diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6f98693addd5cba9a40f6ab9335054951a78b2ee
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,13 @@
+# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
+version: 2.1
+# Use a package of configuration called an orb.
+orbs:
+ # Declare a dependency on the welcome-orb
+ welcome: circleci/welcome-orb@0.4.1
+# Orchestrate or schedule a set of jobs
+workflows:
+ # Name the workflow "welcome"
+ welcome:
+ # Run the welcome/run job in its own container
+ jobs:
+ - welcome/run
diff --git a/.gitignore b/.gitignore
index 1ff11080569e9312369f6e9c00463e25853fd38b..50f4251320abc80358b67eab22c02672d5f26bd6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@ build/
.vscode/
.idea/
cmake-build-debug/
+cmake-build-release/
cscope.out
.DS_Store
debug/
@@ -67,6 +68,8 @@ CMakeError.log
*.o
version.c
taos.rc
+src/connector/jdbc/.classpath
+src/connector/jdbc/.project
src/connector/jdbc/.settings/
tests/comparisonTest/cassandra/cassandratest/.classpath
tests/comparisonTest/cassandra/cassandratest/.project
diff --git a/.gitmodules b/.gitmodules
index 346f5c00699e51eac39dbfaffdbf96656052b024..a2266c46afd180b52d3aa19003380078894f6a4b 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -13,3 +13,6 @@
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
+[submodule "deps/TSZ"]
+ path = deps/TSZ
+ url = https://github.com/taosdata/TSZ.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1fc8d4f1b473311046f3d195fc78b9fc37344f3a..093731f190a380539cca3db8f8c12793d4b6557c 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,7 +2,7 @@ IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
- SET(PROJECT_VERSION_PATCH"${LIB_PATCH_VERSION}")
+ SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
diff --git a/Jenkinsfile b/Jenkinsfile
index 0d9c0f132ed27d371b404383d8826c8d2006aae1..80b3b63dbecf6f7df8dc12330dc8e0235503149f 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -5,7 +5,6 @@ node {
git url: 'https://github.com/taosdata/TDengine.git'
}
-
def skipbuild=0
def abortPreviousBuilds() {
@@ -114,12 +113,10 @@ def pre_test(){
pipeline {
agent none
-
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
-
stages {
stage('pre_build'){
agent{label 'master'}
@@ -158,20 +155,17 @@ pipeline {
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
-
script{
skipbuild='2'
skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
println skipbuild
-
}
sh'''
rm -rf ${WORKSPACE}.tes
'''
}
}
-
stage('Parallel test stage') {
//only build pr
when {
@@ -230,6 +224,26 @@ pipeline {
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
+ sh '''
+ rm -rf /var/lib/taos/*
+ rm -rf /var/log/taos/*
+ nohup taosd >/dev/null &
+ sleep 10
+ '''
+ sh '''
+ cd ${WKC}/tests/examples/nodejs
+ npm install td2.0-connector > /dev/null 2>&1
+ node nodejsChecker.js host=localhost
+ '''
+ sh '''
+ cd ${WKC}/tests/examples/C#/taosdemo
+ mcs -out:taosdemo *.cs > /dev/null 2>&1
+ echo '' |./taosdemo -c /etc/taos
+ '''
+ sh '''
+ cd ${WKC}/tests/gotest
+ bash batchtest.sh
+ '''
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
@@ -237,27 +251,24 @@ pipeline {
}
}
}
-
stage('test_crash_gen_s3') {
agent{label " slave3 || slave13 "}
steps {
pre_test()
- catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- timeout(time: 60, unit: 'MINUTES'){
- sh '''
- cd ${WKC}/tests/pytest
- ./crash_gen.sh -a -p -t 4 -s 2000
- '''
- }
- }
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
- rm -rf /var/lib/taos/*
- rm -rf /var/log/taos/*
- ./handle_crash_gen_val_log.sh
+ ./crash_gen.sh -a -p -t 4 -s 2000
'''
+ }
+ timeout(time: 60, unit: 'MINUTES'){
+ // sh '''
+ // cd ${WKC}/tests/pytest
+ // rm -rf /var/lib/taos/*
+ // rm -rf /var/log/taos/*
+ // ./handle_crash_gen_val_log.sh
+ // '''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
@@ -272,11 +283,9 @@ pipeline {
./test-all.sh b2fq
date
'''
- }
-
+ }
}
}
-
stage('test_valgrind_s4') {
agent{label " slave4 || slave14 "}
@@ -441,6 +450,5 @@ pipeline {
from: "support@taosdata.com"
)
}
- }
-
-}
+ }
+}
\ No newline at end of file
diff --git a/README-CN.md b/README-CN.md
index d4c10e71d684ab5d21c1c767c398707956946232..a9bc814e8d6f6bef0ad94e29588f62e2e4c0e7f1 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。
-# 生成
+# 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
-## 生成 TDengine
+## 构建 TDengine
### Linux 系统
@@ -116,7 +116,13 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
-在X86-64、X86、arm64 和 arm32 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
+您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc:
+```bash
+apt install autoconf
+cmake .. -DJEMALLOC_ENABLED=true
+```
+
+在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
@@ -130,6 +136,12 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
+mips64:
+
+```bash
+cmake .. -DCPUTYPE=mips64 && cmake --build .
+```
+
### Windows 系统
如果你使用的是 Visual Studio 2013 版本:
@@ -173,9 +185,10 @@ cmake .. && cmake --build .
# 安装
-如果你不想安装,可以直接在shell中运行。生成完成后,安装 TDengine:
+生成完成后,安装 TDengine(下文给出的指令以 Linux 为例,如果是在 Windows 下,那么对应的指令会是 `nmake install`):
+
```bash
-make install
+sudo make install
```
用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。
@@ -183,7 +196,7 @@ make install
安装成功后,在终端中启动 TDengine 服务:
```bash
-taosd
+sudo systemctl start taosd
```
用户可以使用 TDengine Shell 来连接 TDengine 服务,在终端中,输入:
@@ -196,7 +209,7 @@ taos
## 快速运行
-TDengine 生成后,在终端执行以下命令:
+如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
```bash
./build/bin/taosd -c test/cfg
diff --git a/README.md b/README.md
index 78f902babe240b76b82d2b77b687f0de15ff6ccd..2dea05f09d268b0d78de15ab98f3584df055c353 100644
--- a/README.md
+++ b/README.md
@@ -110,7 +110,13 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
-TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 and arm32 platform.
+You can use Jemalloc as memory allocator instead of glibc:
+```
+apt install autoconf
+cmake .. -DJEMALLOC_ENABLED=true
+```
+
+TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
aarch64:
@@ -123,13 +129,18 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
+mips64:
+```bash
+cmake .. -DCPUTYPE=mips64 && cmake --build .
+```
+
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
-Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
+Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd
mkdir debug && cd debug
-"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
+"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
@@ -164,7 +175,7 @@ cmake .. && cmake --build .
# Installing
-After building successfully, TDengine can be installed by:
+After building successfully, TDengine can be installed by: (On Windows platform, the following command should be `nmake install`)
```bash
sudo make install
```
@@ -186,7 +197,7 @@ If TDengine shell connects the server successfully, welcome messages and version
## Quick Run
-If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal:
+If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
```bash
./build/bin/taosd -c test/cfg
```
diff --git a/cmake/define.inc b/cmake/define.inc
index 0d2887c823a47793f1847a72fabe91ecef493781..6c466fee026097b0bdeb89c7a4fc54fc382c2726 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -41,6 +41,10 @@ IF (TD_POWER)
ADD_DEFINITIONS(-D_TD_POWER_)
ENDIF ()
+IF (TD_TQ)
+ ADD_DEFINITIONS(-D_TD_TQ_)
+ENDIF ()
+
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
@@ -79,6 +83,8 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
+
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_ARM_32)
@@ -87,6 +93,8 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
+
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_MIPS_64)
@@ -139,6 +147,7 @@ IF (TD_LINUX)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_DARWIN_64)
@@ -160,6 +169,7 @@ IF (TD_DARWIN_64)
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_WINDOWS)
@@ -174,12 +184,15 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
+
IF (TD_MEMORY_SANITIZER)
+ MESSAGE("memory sanitizer detected as true")
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
ELSE ()
+ MESSAGE("memory sanitizer detected as false")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
ENDIF ()
- SET(RELEASE_FLAGS "/W0 /O3 /GL")
+ SET(RELEASE_FLAGS "/W0 /O2 /GL") # MSVC only support O2
ENDIF ()
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread)
@@ -187,6 +200,7 @@ IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src)
ENDIF ()
IF (TD_WINDOWS_64)
@@ -202,6 +216,10 @@ IF (TD_WINDOWS_32)
MESSAGE(STATUS "windows32 is defined")
ENDIF ()
+IF (TD_LINUX)
+ SET(COMMON_FLAGS "${COMMON_FLAGS} -pipe -Wshadow")
+ENDIF ()
+
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/os/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
diff --git a/cmake/input.inc b/cmake/input.inc
index 16ffdc2f47ff8321c4b377a06cf952373d558d55..9d716e1e7345955f7b6b844c85ace7e7bd5c6080 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -46,6 +46,9 @@ ENDIF ()
IF (${DBNAME} MATCHES "power")
SET(TD_POWER TRUE)
MESSAGE(STATUS "power is true")
+ELSEIF (${DBNAME} MATCHES "tq")
+ SET(TD_TQ TRUE)
+ MESSAGE(STATUS "tq is true")
ENDIF ()
IF (${DLLTYPE} MATCHES "go")
@@ -73,6 +76,11 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()
+IF (${JEMALLOC_ENABLED} MATCHES "true")
+ SET(TD_JEMALLOC_ENABLED TRUE)
+ MESSAGE(STATUS "build with jemalloc enabled")
+ENDIF ()
+
SET(TD_BUILD_JDBC TRUE)
IF (${BUILD_JDBC} MATCHES "false")
@@ -83,3 +91,12 @@ SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
ENDIF ()
+
+IF (${TSZ_ENABLED} MATCHES "true")
+ # define add
+ MESSAGE(STATUS "build with TSZ enabled")
+ ADD_DEFINITIONS(-DTD_TSZ)
+ set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
+ELSE()
+ set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
+ENDIF()
diff --git a/cmake/platform.inc b/cmake/platform.inc
index 82ff27a44dbff38b87e8304f978b8f34e204623f..a78082a1fc62a8ad66c54dcf005e3e15edf5f5f0 100755
--- a/cmake/platform.inc
+++ b/cmake/platform.inc
@@ -157,5 +157,5 @@ ELSEIF (${OSTYPE} MATCHES "Alpine")
MESSAGE(STATUS "input osType: Alpine")
SET(TD_APLHINE TRUE)
ELSE ()
- MESSAGE(STATUS "input osType unknown: " ${OSTYPE})
+ MESSAGE(STATUS "The user specified osType is unknown: " ${OSTYPE})
ENDIF ()
diff --git a/cmake/version.inc b/cmake/version.inc
index e2f2d538204da377256efc01a89bff4ae11fb4c0..30b69c2401e2facac8708fdabb83a885cb50cd31 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.20.17")
+ SET(TD_VER_NUMBER "2.2.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt
index eb22459d342da5f726d8688a74b4a5efde2ac5ec..516c752bd101f26f04c3986ed50edd55121c5a40 100644
--- a/deps/CMakeLists.txt
+++ b/deps/CMakeLists.txt
@@ -15,7 +15,6 @@ ADD_SUBDIRECTORY(cJson)
ADD_SUBDIRECTORY(wepoll)
ADD_SUBDIRECTORY(MsvcLibX)
ADD_SUBDIRECTORY(rmonotonic)
-
ADD_SUBDIRECTORY(lua)
IF (TD_LINUX AND TD_MQTT)
@@ -38,3 +37,7 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
BUILD_COMMAND ${MAKE}
)
ENDIF ()
+
+IF (${TSZ_ENABLED} MATCHES "true")
+ ADD_SUBDIRECTORY(TSZ)
+ENDIF()
\ No newline at end of file
diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c
index 40b6e6462d9d0a6dc53509e8645cfba50b446256..1ec0dc73547852e37a23ff308cb740bbd88d872c 100644
--- a/deps/MsvcLibX/src/iconv.c
+++ b/deps/MsvcLibX/src/iconv.c
@@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes;
char *pBuf;
+ char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes);
if (!pBuf) {
@@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf);
return NULL;
}
- pBuf = realloc(pBuf, nBytes+1);
- return pBuf;
+ pBuf1 = realloc(pBuf, nBytes+1);
+ if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
+ return pBuf1;
}
int CountCharacters(const char *string, UINT cp) {
diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c
index f366b081ad688e15dc62dd0c8a7ccf9bb409afe0..85f4c83f24400e12c4a4b996b863df94e07cf819 100644
--- a/deps/MsvcLibX/src/main.c
+++ b/deps/MsvcLibX/src/main.c
@@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0;
char **ppszArg;
+ char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE;
ppszArg[argc++] = pszCopy+j;
- ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ if(ppszArg1 == NULL && ppszArg != NULL)
+ free(ppszArg);
+ ppszArg = ppszArg1;
if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0';
}
@@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0';
}
- realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
+ //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */
diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c
index 5fbcf773a24d0950ac8099cd9274fcbb3157a954..e2ba755f2d8b60e545e8ead71d198e60ca7a47c0 100644
--- a/deps/MsvcLibX/src/realpath.c
+++ b/deps/MsvcLibX/src/realpath.c
@@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
int iErr;
const char *pc;
@@ -242,8 +243,11 @@ realpath_failed:
return NULL;
}
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
- return pOutbuf;
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
+ return pOutbuf1;
}
#endif
@@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
char *pPath1 = NULL;
char *pPath2 = NULL;
int iErr;
@@ -590,10 +595,13 @@ realpathU_failed:
}
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
free(pPath1);
free(pPath2);
- return pOutbuf;
+ return pOutbuf1;
}
#endif /* defined(_WIN32) */
diff --git a/deps/TSZ b/deps/TSZ
new file mode 160000
index 0000000000000000000000000000000000000000..11c1060d4f917dd799ae628b131db5d6a5ef6954
--- /dev/null
+++ b/deps/TSZ
@@ -0,0 +1 @@
+Subproject commit 11c1060d4f917dd799ae628b131db5d6a5ef6954
diff --git a/deps/rmonotonic/src/monotonic.c b/deps/rmonotonic/src/monotonic.c
index 1470f91b56c79b4ee2d8429ecf58fc365d03e737..c6d2df9097ce0d435fb9dd1ec42952dd37c10de9 100644
--- a/deps/rmonotonic/src/monotonic.c
+++ b/deps/rmonotonic/src/monotonic.c
@@ -36,6 +36,15 @@ static char monotonic_info_string[32];
static long mono_ticksPerMicrosecond = 0;
+#ifdef _TD_NINGSI_60
+// implement __rdtsc in ningsi60
+uint64_t __rdtsc(){
+ unsigned int lo,hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return ((uint64_t)hi << 32) | lo;
+}
+#endif
+
static monotime getMonotonicUs_x86() {
return __rdtsc() / mono_ticksPerMicrosecond;
}
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index aba10a14e327ff104eb997b1ad6af29e3de6cad1..18bdc15d30430516c3ae6c847fc448477003dd66 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
+* [Kubernetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubernetes环境进行部署的详细说明
## [整体架构](/architecture)
@@ -41,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
-* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
+* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
@@ -62,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [高级功能](/advanced-features)
* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算
-* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据
+* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据
* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取
* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送
@@ -80,7 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [与其他工具的连接](/connections)
* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据
-* [Matlab](/connections#matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据
+* [MATLAB](/connections#matlab):通过配置MATLAB的JDBC数据源访问保存在TDengine的数据
* [R](/connections#r):通过配置R的JDBC数据源访问保存在TDengine的数据
* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine
@@ -105,6 +106,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入
* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出
* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等
+* [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
@@ -117,9 +119,9 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## 常用工具
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
-* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
+* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
-* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
+* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
## TDengine与其他数据库的对比测试
diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md
index 0ae2106ff2a63696dc8bbc51d25bbf5e811ef561..4263391dbcb5a02cce3af2766b60799f15d13af4 100644
--- a/documentation20/cn/01.evaluation/docs.md
+++ b/documentation20/cn/01.evaluation/docs.md
@@ -2,18 +2,18 @@
## TDengine 简介
-TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
+TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
-TDengine的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与Hadoop等典型的大数据平台相比,它具有如下鲜明的特点:
+TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点:
-* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
-* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
-* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
-* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
-* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
-* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
+* __10 倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
+* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。
+* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。
+* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。
+* __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark 等,BI 工具也将无缝连接。
+* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。
-采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
+采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。

图 1. TDengine技术生态图
@@ -21,42 +21,47 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
## TDengine 总体适用场景
-作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。
+作为一个 IOT 大数据平台,TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。
### 数据源特点和需求
-从数据源角度,设计人员可以从下面几个角度分析TDengine在目标应用系统里面的适用性。
+
+从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
|数据源特点和需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|总体数据量巨大| | | √ |TDengine在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
-|数据输入速度偶尔或者持续巨大| | | √ | TDengine的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
-|数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
+|总体数据量巨大| | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
+|数据输入速度偶尔或者持续巨大| | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
+|数据源数目巨大| | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
### 系统架构要求
+
|系统架构要求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
-|要求容错和高可靠| | | √ |TDengine的集群功能,自动提供容错灾备等高可靠功能。|
-|标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能,遵守标准化规范。|
+|要求简单可靠的系统架构| | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
+|要求容错和高可靠| | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。|
+|标准化规范| | | √ | TDengine 使用标准的SQL语言提供主要功能,遵守标准化规范。|
### 系统功能需求
+
|系统功能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
-|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑TDengine和关系型数据系统配合实现系统功能。|
+|要求完整的内置数据处理算法| | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
+|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。|
### 系统性能需求
+
|系统性能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
-|要求高速处理数据 | | | √ |TDengine的专门为IOT优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
-|要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。|
+|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
+|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
+|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
### 系统维护需求
+
|系统维护需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
+|要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
|要求运维学习成本可控| | | √ |同上。|
-|要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
+|要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..30803d977704606b042c589b96b649d99a850106
--- /dev/null
+++ b/documentation20/cn/02.getting-started/01.docker/docs.md
@@ -0,0 +1,211 @@
+# 通过 Docker 快速体验 TDengine
+
+虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
+
+下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
+
+## 下载 Docker
+
+Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。
+
+安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。
+
+```bash
+$ docker -v
+Docker version 20.10.5, build 55c4c88
+```
+
+## 在 Docker 容器中运行 TDengine
+
+1,使用命令拉取 TDengine 镜像,并使它在后台运行。
+
+```bash
+$ docker run -d tdengine/tdengine
+cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
+```
+
+- **docker run**:通过 Docker 运行一个容器。
+- **-d**:让容器在后台运行。
+- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
+- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
+
+2,确认容器是否已经正确运行。
+
+```bash
+$ docker ps
+CONTAINER ID IMAGE COMMAND CREATED STATUS ···
+cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
+```
+
+- **docker ps**:列出所有正在运行状态的容器信息。
+- **CONTAINER ID**:容器 ID。
+- **IMAGE**:使用的镜像。
+- **COMMAND**:启动容器时运行的命令。
+- **CREATED**:容器创建时间。
+- **STATUS**:容器状态。UP 表示运行中。
+
+3,进入 Docker 容器内,使用 TDengine。
+
+```bash
+$ docker exec -it cdf548465318 /bin/bash
+root@cdf548465318:~/TDengine-server-2.0.13.0#
+```
+
+- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
+- **-i**:进入交互模式。
+- **-t**:指定一个终端。
+- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
+- **/bin/bash**:载入容器后运行 bash 来进行交互。
+
+4,进入容器后,执行 taos shell 客户端程序。
+
+```bash
+$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+
+Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos>
+```
+
+TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
+
+在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)。
+
+## 通过 taosdemo 进一步了解 TDengine
+
+1,接上面的步骤,先退出 TDengine 终端程序。
+
+```bash
+$ taos> q
+root@cdf548465318:~/TDengine-server-2.0.13.0#
+```
+
+2,在命令行界面执行 taosdemo。
+
+```bash
+$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
+###################################################################
+# Server IP: localhost:0
+# User: root
+# Password: taosdata
+# Use metric: true
+# Datatype of Columns: int int int int int int int float
+# Binary Length(If applicable): -1
+# Number of Columns per record: 3
+# Number of Threads: 10
+# Number of Tables: 10000
+# Number of Data per Table: 100000
+# Records/Request: 1000
+# Database name: test
+# Table prefix: t
+# Delete method: 0
+# Test time: 2021-04-13 02:05:20
+###################################################################
+```
+
+回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
+
+3,进入 TDengine 终端,查看 taosdemo 生成的数据。
+
+- **进入命令行。**
+
+```bash
+$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+
+Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos>
+```
+
+- **查看数据库。**
+
+```bash
+$ taos> show databases;
+ name | created_time | ntables | vgroups | ···
+ test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
+ log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
+
+```
+
+- **查看超级表。**
+
+```bash
+$ taos> use test;
+Database changed.
+
+$ taos> show stables;
+ name | created_time | columns | tags | tables |
+=====================================================================================
+ meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
+Query OK, 1 row(s) in set (0.001737s)
+
+```
+
+- **查看表,限制输出十条。**
+
+```bash
+$ taos> select * from test.t0 limit 10;
+ ts | f1 | f2 | f3 |
+====================================================================
+ 2017-07-14 02:40:01.000 | 3 | 9 | 0 |
+ 2017-07-14 02:40:02.000 | 0 | 1 | 2 |
+ 2017-07-14 02:40:03.000 | 7 | 2 | 3 |
+ 2017-07-14 02:40:04.000 | 9 | 4 | 5 |
+ 2017-07-14 02:40:05.000 | 1 | 2 | 5 |
+ 2017-07-14 02:40:06.000 | 6 | 3 | 2 |
+ 2017-07-14 02:40:07.000 | 4 | 7 | 8 |
+ 2017-07-14 02:40:08.000 | 4 | 6 | 6 |
+ 2017-07-14 02:40:09.000 | 5 | 7 | 7 |
+ 2017-07-14 02:40:10.000 | 1 | 5 | 0 |
+Query OK, 10 row(s) in set (0.003638s)
+
+```
+
+- **查看 t0 表的标签值。**
+
+```bash
+$ taos> select areaid, loc from test.t0;
+ areaid | loc |
+===========================
+ 10 | shanghai |
+Query OK, 1 row(s) in set (0.002904s)
+
+```
+
+## 停止正在 Docker 中运行的 TDengine 服务
+
+```bash
+$ docker stop cdf548465318
+cdf548465318
+```
+
+- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
+- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
+
+## 编程开发时连接在 Docker 中的 TDengine
+
+从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路:
+
+1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
+
+```bash
+$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
+526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
+
+$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
+{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
+```
+
+- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
+- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。
+
+注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。
+
+2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
+
+```bash
+$ docker exec -it 526aa188da /bin/bash
+```
+
diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md
index b46322cef28c7c3e78c260680dcc501684e6844a..ab10b28fd3950bfa10e47113696de0829b2da74d 100644
--- a/documentation20/cn/02.getting-started/docs.md
+++ b/documentation20/cn/02.getting-started/docs.md
@@ -2,27 +2,29 @@
## 快捷安装
-TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。
+TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。
### 通过源码安装
-请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装.
+请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码并安装.
-### 通过Docker容器运行
+### 通过 Docker 容器运行
-请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html)
+暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。
+
+详细操作方法请参照 [通过 Docker 快速体验 TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
### 通过安装包安装
-TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
+TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择:
-安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
+安装包下载在 [这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。
-具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
+具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
## 轻松启动
-安装成功后,用户可使用`systemctl`命令来启动TDengine的服务进程。
+安装成功后,用户可使用 `systemctl` 命令来启动 TDengine 的服务进程。
```bash
$ systemctl start taosd
@@ -33,38 +35,39 @@ $ systemctl start taosd
$ systemctl status taosd
```
-如果TDengine服务正常工作,那么您可以通过TDengine的命令行程序`taos`来访问并体验TDengine。
+如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
**注意:**
-- systemctl命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo
-- 为更好的获得产品反馈,改善产品,TDengine会采集基本的使用信息,但您可以修改系统配置文件taos.cfg里的配置参数telemetryReporting, 将其设为0,就可将其关闭。
-- TDengine采用FQDN(一般就是hostname)作为节点的ID,为保证正常运行,需要给运行taosd的服务器配置好hostname,在客户端应用运行的机器配置好DNS服务或hosts文件,保证FQDN能够解析。
+- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
+- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting, 将其设为 0,就可将其关闭。
+- TDengine 采用 FQDN (一般就是 hostname )作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 hostname,在客户端应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。
+- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
-* TDengine 支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装,用`which systemctl`命令来检测系统中是否存在`systemd`包:
+* TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包:
```bash
$ which systemctl
```
- 如果系统中不支持systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
+ 如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
-## TDengine命令行程序
+## TDengine 命令行程序
-执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。
+执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。
```bash
$ taos
```
-如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下:
+如果 TDengine 终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。TDengine 终端的提示符号如下:
```cmd
taos>
```
-在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:
+在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
```mysql
create database demo;
@@ -73,24 +76,24 @@ create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
- ts | speed |
-===================================
- 19-07-15 00:00:00.000| 10|
- 19-07-15 01:00:00.000| 20|
-Query OK, 2 row(s) in set (0.001700s)
+ ts | speed |
+========================================
+ 2019-07-15 00:00:00.000 | 10 |
+ 2019-07-15 01:00:00.000 | 20 |
+Query OK, 2 row(s) in set (0.003128s)
```
-除执行SQL语句外,系统管理员还可以从TDengine终端检查系统运行状态,添加删除用户账号等。
+除执行 SQL 语句外,系统管理员还可以从 TDengine 终端检查系统运行状态,添加删除用户账号等。
### 命令行参数
-您可通过配置命令行参数来改变TDengine终端的行为。以下为常用的几个命令行参数:
+您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数:
-- -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_
-- -h, --host: 指定服务的IP地址,默认为本地服务
-- -s, --commands: 在不进入终端的情况下运行TDengine命令
-- -u, -- user: 连接TDengine服务器的用户名,缺省为root
-- -p, --password: 连接TDengine服务器的密码,缺省为taosdata
+- -c, --config-dir: 指定配置文件目录,默认为 _/etc/taos_
+- -h, --host: 指定服务的 FQDN 地址(也可以使用 IP),默认为连接本地服务
+- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
+- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root
+- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata
- -?, --help: 打印出所有命令行参数
示例:
@@ -99,7 +102,7 @@ Query OK, 2 row(s) in set (0.001700s)
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
-### 运行SQL命令脚本
+### 运行 SQL 命令脚本
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
@@ -107,27 +110,27 @@ TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
taos> source ;
```
-### Shell小技巧
+### Shell 小技巧
- 可以使用上下光标键查看历史输入的指令
-- 修改用户密码。在 shell 中使用 alter user 指令
+- 修改用户密码,在 shell 中使用 alter user 指令
- ctrl+c 中止正在进行中的查询
- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
## TDengine 极速体验
-启动TDengine的服务,在Linux终端执行taosdemo
+启动 TDengine 的服务,在 Linux 终端执行 taosdemo
```bash
$ taosdemo
```
-该命令将在数据库test下面自动创建一张超级表meters,该超级表下有1万张表,表名为"t0" 到"t9999",每张表有10万条记录,每条记录有 (f1, f2, f3)三个字段,时间戳从"2017-07-14 10:40:00 000" 到"2017-07-14 10:41:39 999",每张表带有标签areaid和loc, areaid被设置为1到10, loc被设置为"beijing"或者“shanghai"。
+该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
-执行这条命令大概需要10分钟,最后共插入10亿条记录。
+执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
-在TDengine客户端输入查询命令,体验查询速度。
+在 TDengine 客户端输入查询命令,体验查询速度。
- 查询超级表下记录总条数:
@@ -135,72 +138,64 @@ $ taosdemo
taos> select count(*) from test.meters;
```
-- 查询10亿条记录的平均值、最大值、最小值等:
+- 查询 1 亿条记录的平均值、最大值、最小值等:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.meters;
+taos> select avg(current), max(voltage), min(phase) from test.meters;
```
-- 查询loc="beijing"的记录总条数:
+- 查询 location="beijing" 的记录总条数:
```mysql
-taos> select count(*) from test.meters where loc="beijing";
+taos> select count(*) from test.meters where location="beijing";
```
-- 查询areaid=10的所有记录的平均值、最大值、最小值等:
+- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
+taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
```
-- 对表t10按10s进行平均值、最大值和最小值聚合统计:
+- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
```mysql
-taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
+taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
```
-**Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。
+**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
## 客户端和报警模块
-如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下:
-
-- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
-- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
-- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
-
-报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
+如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。
-- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
+报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。
## 支持平台列表
-### TDengine服务器支持的平台列表
+### TDengine 服务器支持的平台列表
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** |
| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- |
| X64 | ● | ● | | ○ | ● | ● | ● |
-| 树莓派 ARM32 | | ● | ● | | | | |
| 龙芯 MIPS64 | | | ● | | | | |
-| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
-| 申威 Alpha64 | | | ○ | ● | | | |
+| 鲲鹏 ARM64 | | ○ | ○ | | ● | | |
+| 申威 Alpha64 | | | ○ | ● | | | |
| 飞腾 ARM64 | | ○ 优麒麟 | | | | | |
| 海光 X64 | ● | ● | ● | ○ | ● | ● | |
-| 瑞芯微 ARM64/32 | | | ○ | | | | |
-| 全志 ARM64/32 | | | ○ | | | | |
-| 炬力 ARM64/32 | | | ○ | | | | |
-| TI ARM32 | | | ○ | | | | |
-| 华为云 ARM64 | | | | | | | ● |
+| 瑞芯微 ARM64 | | | ○ | | | | |
+| 全志 ARM64 | | | ○ | | | | |
+| 炬力 ARM64 | | | ○ | | | | |
+| 华为云 ARM64 | | | | | | | ● |
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
-### TDengine客户端和连接器支持的平台列表
+### TDengine 客户端和连接器支持的平台列表
-目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。
+目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。
对照矩阵如下:
@@ -217,5 +212,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
-请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。
+请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。
diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md
index 8e1b1e3ab1513fbeaa5b9b805263485a13483b9b..27ac7f123cdd2a56df9e65ae0fa13d1ff8faa23d 100644
--- a/documentation20/cn/03.architecture/02.replica/docs.md
+++ b/documentation20/cn/03.architecture/02.replica/docs.md
@@ -107,11 +107,11 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性

-1. 应用对写请求做基本的合法性检查,通过,则给改请求包打上一个版本号(version, 单调递增)
+1. 应用对写请求做基本的合法性检查,通过,则给该请求包打上一个版本号(version, 单调递增)
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
-3. 应用调用API syncForwardToPeer,如多vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
+3. 应用调用API syncForwardToPeer,如果vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
-5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
+5. vnode B应用在写入成功后,都需要调用syncConfirmForward通知sync模块已经写入成功。
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
7. 如果quorum大于1,vnode A需要等待vnode B或其他副本对Forward消息的确认。
8. 如果quorum大于1,vnode A收到quorum-1条确认消息后,调用回调函数confirmForward,通知应用写入成功。
@@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下:
-
+
1. 通过已经建立的TCP连接,发送sync req给master节点
2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd)
@@ -219,7 +219,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
不同之处:
-- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request, 如果超过半数回答Yes, 这个candidate就成为Leader,开始一个新的term. 而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组类传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会给节点组内其他节点进行广播的。Raft里不存在这样的机制,因此需要投票来解决。
+- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。
- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。
如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。
diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md
index 87553fa8ad9760ecdb6d1667823d336189542331..b481bea9f840ad459812f955aa76a8a7829d5b37 100644
--- a/documentation20/cn/03.architecture/docs.md
+++ b/documentation20/cn/03.architecture/docs.md
@@ -176,9 +176,9 @@ TDengine 分布式架构的逻辑结构图如下:
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
-**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
+**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
-**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
+**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
@@ -323,8 +323,6 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。
-注:vnode之间的同步复制仅仅企业版支持
-
## 缓存与持久化
### 缓存
@@ -343,7 +341,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。
-给定days与keep两个参数,一个vnode总的数据文件数为:keep/days。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
+给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`个。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数maxRows(每块最大记录条数)决定,缺省值为4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。
diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md
index a161778a72728ca05a75538c8b04ca0277e01bb2..5557134aac23b4f69066c9fb41aaa51972fcbba3 100644
--- a/documentation20/cn/06.queries/docs.md
+++ b/documentation20/cn/06.queries/docs.md
@@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
-例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
+例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
diff --git a/documentation20/cn/07.advanced-features/docs.md b/documentation20/cn/07.advanced-features/docs.md
index 650a2ca96b759bd6b8123dbb64023496b654dcd0..1077f299ee2a2e93589d0246af7633a6886c6756 100644
--- a/documentation20/cn/07.advanced-features/docs.md
+++ b/documentation20/cn/07.advanced-features/docs.md
@@ -120,7 +120,7 @@ if (async) {
}
```
-TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。
+TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。)
参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 3442a2248cd3743cc93034fb5aa9d13b96079543..511bab8a605ce666d263d609d1599e30c85d78c4 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
@@ -50,6 +49,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
+注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
## 如何获取 taos-jdbcdriver
@@ -267,7 +267,9 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
+
在报错后,通过SQLException可以获取到错误的信息和错误码:
+
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
@@ -280,11 +282,90 @@ try (Statement statement = connection.createStatement()) {
e.printStackTrace();
}
```
+
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
具体的错误码请参考:
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
+### 通过参数绑定写入数据
+
+从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
+
+```java
+Statement stmt = conn.createStatement();
+Random r = new Random();
+
+// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
+TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
+
+// 设定数据表名:
+s.setTableName("w1");
+// 设定 TAGS 取值:
+s.setTagInt(0, r.nextInt(10));
+s.setTagString(1, "Beijing");
+
+int numOfRows = 10;
+
+// VALUES 部分以逐列的方式进行设置:
+ArrayList ts = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ ts.add(System.currentTimeMillis() + i);
+}
+s.setTimestamp(0, ts);
+
+ArrayList s1 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s1.add(r.nextInt(100));
+}
+s.setInt(1, s1);
+
+ArrayList s2 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s2.add("test" + r.nextInt(100));
+}
+s.setString(2, s2, 10);
+
+// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
+s.columnDataAddBatch();
+// 执行绑定数据后的语句:
+s.columnDataExecuteBatch();
+// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
+s.columnDataClearBatch();
+// 执行完毕,释放资源:
+s.columnDataCloseBatch();
+```
+
+用于设定 TAGS 取值的方法总共有:
+```java
+public void setTagNull(int index, int type)
+public void setTagBoolean(int index, boolean value)
+public void setTagInt(int index, int value)
+public void setTagByte(int index, byte value)
+public void setTagShort(int index, short value)
+public void setTagLong(int index, long value)
+public void setTagTimestamp(int index, long value)
+public void setTagFloat(int index, float value)
+public void setTagDouble(int index, double value)
+public void setTagString(int index, String value)
+public void setTagNString(int index, String value)
+```
+
+用于设定 VALUES 数据列的取值的方法总共有:
+```java
+public void setInt(int columnIndex, ArrayList list) throws SQLException
+public void setFloat(int columnIndex, ArrayList list) throws SQLException
+public void setTimestamp(int columnIndex, ArrayList list) throws SQLException
+public void setLong(int columnIndex, ArrayList list) throws SQLException
+public void setDouble(int columnIndex, ArrayList list) throws SQLException
+public void setBoolean(int columnIndex, ArrayList list) throws SQLException
+public void setByte(int columnIndex, ArrayList list) throws SQLException
+public void setShort(int columnIndex, ArrayList list) throws SQLException
+public void setString(int columnIndex, ArrayList list, int size) throws SQLException
+public void setNString(int columnIndex, ArrayList list, int size) throws SQLException
+```
+其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
+
### 订阅
#### 创建
@@ -447,12 +528,13 @@ Query OK, 1 row(s) in set (0.000141s)
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
-| 2.0.22 | 2.0.18.0 及以上 | 1.8.x |
-| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x |
+| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
+| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
@@ -471,7 +553,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
-| SMALLINT | java.lang.Short |
+| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 5a6c26587a025d73711e91263010887ac3e027b4..3a6e884f56addc7d2d4ccacad57ef3baa6844a4b 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Linux**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
@@ -56,7 +56,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*taos.tar.gz*:应用驱动安装包
*driver*:TDengine应用驱动driver
*connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC)
- *examples*: 各种编程语言的示例程序(c/C#/go/JDBC/matlab/python/R)
+ *examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装
@@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Windows x64/x86**
-**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
@@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `int taos_result_precision(TAOS_RES *res)`
- 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。
+ 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
@@ -259,7 +259,7 @@ typedef struct taosField {
获取最近一次API调用失败的原因,返回值为字符串。
-- `char *taos_errno(TAOS_RES *res)`
+- `int taos_errno(TAOS_RES *res)`
获取最近一次API调用失败的原因,返回值为错误代码。
@@ -291,9 +291,27 @@ typedef struct taosField {
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
-### 参数绑定API
+
+### 参数绑定 API
-除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下:
+除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。
+
+从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下:
+1. 调用 `taos_stmt_init` 创建参数绑定对象;
+2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
+3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
+4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
+5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
+6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
+7. 可以重复第 3~6 步,为批处理加入更多的数据行;
+8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令;
+9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。
+
+说明:如果 `taos_stmt_execute` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare` 的解析结果,直接进行第 3~6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init` 步骤重新开始。
+
+除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)。
+
+接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
@@ -301,11 +319,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
- 解析一条sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此参数作为sql语句的长度,如等于0,将自动判断sql语句的长度。
+ 解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
- 进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下:
+ 不如 `taos_stmt_bind_param_batch` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
+ 进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 一致,具体定义如下:
```c
typedef struct TAOS_BIND {
@@ -319,9 +338,35 @@ typedef struct TAOS_BIND {
} TAOS_BIND;
```
+- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
+
+ (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
+ 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
+
+- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
+
+ (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
+ 当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
+
+- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
+
+ (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
+ 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
+
+```c
+typedef struct TAOS_MULTI_BIND {
+ int buffer_type;
+ void * buffer;
+ uintptr_t buffer_length;
+ int32_t * length;
+ char * is_null;
+ int num; // 列的个数,即 buffer 中的参数个数
+} TAOS_MULTI_BIND;
+```
+
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
- 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用`taos_stmt_bind_param`绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL语句,将返回错误。
+ 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param` 或 `taos_stmt_bind_param_batch` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
- `int taos_stmt_execute(TAOS_STMT *stmt)`
@@ -329,12 +374,17 @@ typedef struct TAOS_BIND {
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
- 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。
+ 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result` 以释放资源。
- `int taos_stmt_close(TAOS_STMT *stmt)`
执行完毕,释放所有资源。
+- `char * taos_stmt_errstr(TAOS_STMT *stmt)`
+
+ (2.1.3.0 版本新增)
+ 用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。
+
### 连续查询接口
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
@@ -345,11 +395,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* taos:已经建立好的数据库连接
* sql:SQL查询语句(仅能使用查询语句)
* fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。
- * stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)
+ * stime:是流式计算开始的时间。如果是“64位整数最小值”,表示从现在开始;如果不为“64位整数最小值”,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)。
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
- 返回值为NULL,表示创建成功,返回值不为空,表示成功。
+ 返回值为NULL,表示创建失败;返回值不为空,表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
@@ -377,11 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* res:查询结果集,注意结果集中可能没有记录
* param:调用 `taos_subscribe`时客户程序提供的附加参数
* code:错误码
+
+ **注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。
+ **注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
+
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
@@ -503,6 +557,13 @@ c1.close()
conn.close()
```
+#### 关于纳秒 (nanosecond) 在 Python 连接器中的说明
+
+由于目前 Python 对 nanosecond 支持的不完善(参见链接 1. 2. ),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,涛思数据可能会修改相关接口。
+
+1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
+2. https://www.python.org/dev/peps/pep-0564/
+
#### 帮助信息
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
@@ -515,7 +576,7 @@ conn.close()
- _TDengineCursor_ 类
参考python中help(taos.TDengineCursor)。
- 这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能夸线程共享使用,否则会导致返回结果出现错误。
+ 这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
- _connect_ 方法
@@ -534,7 +595,9 @@ conn.close()
## RESTful Connector
-为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
+为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
+
+注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
### HTTP请求格式
@@ -738,7 +801,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
-- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
+- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
@@ -752,7 +815,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
-* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
+* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。
### 安装验证
@@ -844,11 +907,15 @@ go env -w GOPROXY=https://goproxy.io,direct
sql.Open内置的方法,Close closes the statement.
+### 其他代码示例
+
+[Consume Messages from Kafka](https://github.com/taosdata/go-demo-kafka) 是一个通过 Go 语言实现消费 Kafka 队列写入 TDengine 的示例程序,也可以作为通过 Go 连接 TDengine 的写法参考。
+
## Node.js Connector
Node.js连接器支持的系统有:
-| **CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
+|**CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index 79380f3bbd9680120f63f89a0bfbe6f31f5c7a74..b47f297ae0a68c91e5d38aad000acdb14591283d 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -16,7 +16,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin
以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
+sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### 使用 Grafana
@@ -75,50 +75,45 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine

-## Matlab
+## MATLAB
-MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
+MATLAB 可以通过安装包内提供的 JDBC Driver 直接连接到 TDengine 获取数据到本地工作空间。
-### MatLab的JDBC接口适配
+### MATLAB 的 JDBC 接口适配
-MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为例:
+MATLAB 的适配有下面几个步骤,下面以 Windows 10 上适配 MATLAB2021a 为例:
-- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox
-- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64
-- 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
-
+- 将 TDengine 客户端安装路径下的 `\TDengine\connector\jdbc的驱动程序taos-jdbcdriver-2.0.25-dist.jar` 拷贝到 `${matlab_root}\MATLAB\R2021a\java\jar\toolbox`。
+- 将 TDengine 安装包内的 `taos.lib` 文件拷贝至 `${matlab_root_dir}\MATLAB\R2021\lib\win64`。
+- 将新添加的驱动 jar 包加入 MATLAB 的 classpath。在 `${matlab_root_dir}\MATLAB\R2021a\toolbox\local\classpath.txt` 文件中添加下面一行:
```
-$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
+$matlabroot/java/jar/toolbox/taos-jdbcdriver-2.0.25-dist.jar
```
-- 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行:
-
+- 在 `${user_home}\AppData\Roaming\MathWorks\MATLAB\R2021a\` 下添加一个文件 `javalibrarypath.txt`,并在该文件中添加 taos.dll 的路径,比如您的 taos.dll 是在安装时拷贝到了 `C:\Windows\System32` 下,那么就应该在 `javalibrarypath.txt` 中添加如下一行:
```
C:\Windows\System32
```
-### 在MatLab中连接TDengine获取数据
+### 在 MATLAB 中连接 TDengine 获取数据
-在成功进行了上述配置后,打开MatLab。
+在成功进行了上述配置后,打开 MATLAB。
- 创建一个连接:
-
```matlab
-conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)
+conn = database(‘test’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://192.168.1.94:6030/’)
```
- 执行一次查询:
-
```matlab
sql0 = [‘select * from tb’]
data = select(conn, sql0);
```
- 插入一条记录:
-
```matlab
sql1 = [‘insert into tb values (now, 1)’]
exec(conn, sql1)
```
-更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。
+更多例子细节请参考安装包内 `examples\Matlab\TDengineDemo.m` 文件。
## R
diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md
index a430ce8277b49a3dbf7062fc078a47a3d848f8d8..db20ca4edb6513f70ebbf17969be1c20dccb6163 100644
--- a/documentation20/cn/10.cluster/docs.md
+++ b/documentation20/cn/10.cluster/docs.md
@@ -55,12 +55,11 @@ arbitrator ha.taosdata.com:6042
| 4 | statusInterval | dnode向mnode报告状态时长 |
| 5 | arbitrator | 系统中裁决器的end point |
| 6 | timezone | 时区 |
-| 7 | locale | 系统区位信息及编码格式 |
-| 8 | charset | 字符集编码 |
-| 9 | balance | 是否启动负载均衡 |
-| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
-| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
+| 7 | balance | 是否启动负载均衡 |
+| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
+| 9 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
+备注:在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。
## 启动第一个数据节点
@@ -86,7 +85,7 @@ taos>
将后续的数据节点添加到现有集群,具体有以下几步:
-1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;
+1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index cc8689786d1725efdff82610190d4a6b1e34f906..b37916d790c5353db3a500173f4ef727a30c766d 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -99,9 +99,8 @@ taosd -C
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
-- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
-- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
-- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
+- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
+- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
- arbitrator:系统中裁决器的end point, 缺省值为空。
@@ -115,22 +114,24 @@ taosd -C
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
-**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。
+**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
-不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
+不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值):
-- days:一个数据文件存储数据的时间跨度,单位为天,默认值:10。
-- keep:数据库中数据保留的天数,单位为天,默认值:3650。(可通过 alter database 修改)
-- minRows:文件块中记录的最小条数,单位为条,默认值:100。
-- maxRows:文件块中记录的最大条数,单位为条,默认值:4096。
-- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
-- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。
+- days:一个数据文件存储数据的时间跨度。单位为天,默认值:10。
+- keep:数据库中数据保留的天数。单位为天,默认值:3650。(可通过 alter database 修改)
+- minRows:文件块中记录的最小条数。单位为条,默认值:100。
+- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。
+- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
+- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
-- cache:内存块的大小,单位为兆字节(MB),默认值:16。
+- cache:内存块的大小。单位为兆字节(MB),默认值:16。
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
-- replica:副本个数,取值范围:1-3。单位为个,默认值:1。(可通过 alter database 修改)
-- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms。
-- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(可通过 alter database 修改)(从 2.0.11 版本开始支持此参数)
+- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
+- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改)
+- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
+- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
+- update:是否允许更新。0:不允许;1:允许。默认值:0。
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
@@ -142,15 +143,18 @@ taosd -C
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
-- numOfMnodes:系统中管理节点个数。默认值:3。
-- balance:是否启动负载均衡。0:否,1:是。默认值:1。
+- numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。)
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
- arbitrator: 系统中裁决器的end point,缺省为空。
-- timezone、locale、charset 的配置见客户端配置。
+- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
+- balance:是否启用负载均衡。0:否,1:是。默认值:1。
+- flowctrl:是否启用非阻塞流控。0:否,1:是。默认值:1。
+- slaveQuery:是否启用 slave vnode 参与查询。0:否,1:是。默认值:1。
+- adjustMaster:是否启用 vnode master 负载均衡。0:否,1:是。默认值:1。
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
@@ -414,6 +418,19 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
+
+## 性能优化
+
+因数据行 [update](https://www.taosdata.com/cn/documentation/faq#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程:
+
+```mysql
+COMPACT VNODES IN (vg_id1, vg_id2, ...)
+```
+
+COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。
+
+需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
+
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
@@ -445,7 +462,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
-- 数据库名、表名、列名,都不能以数字开头
+- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度:65480 byte
@@ -461,43 +478,44 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
-| 关键字列表 | | | | |
-| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
-| ABORT | COPY | ID | MODULES | SLIMIT |
-| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
-| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
-| ADD | CTIME | IMMEDIATE | NONE | STABLE |
-| AFTER | DATABASE | IMPORT | NOT | STABLES |
-| ALL | DATABASES | IN | NOTNULL | STAR |
-| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
-| AND | DEFERRED | INSERT | OF | STDDEV |
-| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
-| ASC | DESC | INTEGER | OR | STREAMS |
-| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
-| AVG | DETACH | INTO | PASS | SUM |
-| BEFORE | DIFF | IP | PERCENTILE | TABLE |
-| BEGIN | DISTINCT | IS | PLUS | TABLES |
-| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
-| BIGINT | DNODE | JOIN | PREV | TAGS |
-| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
-| BITAND | DOT | KEY | QUERIES | TBNAME |
-| BITNOT | DOUBLE | KILL | QUERY | TIMES |
-| BITOR | DROP | LAST | RAISE | TIMESTAMP |
-| BOOL | EACH | LE | REM | TINYINT |
-| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
-| BY | EQ | LIKE | REPLICA | TRIGGER |
-| CACHE | EXISTS | LIMIT | RESET | UMINUS |
-| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
-| CHANGE | FAIL | LOCAL | ROW | USE |
-| CLOG | FILL | LP | ROWS | USER |
-| CLUSTER | FIRST | LSHIFT | RP | USERS |
-| COLON | FLOAT | LT | RSHIFT | USING |
-| COLUMN | FOR | MATCH | SCORES | VALUES |
-| COMMA | FROM | MAX | SELECT | VARIABLE |
-| COMP | GE | METRIC | SEMI | VGROUPS |
-| CONCAT | GLOB | METRICS | SET | VIEW |
-| CONFIGS | GRANTS | MIN | SHOW | WAVG |
-| CONFLICT | GROUP | MINUS | SLASH | WHERE |
-| CONNECTION | | | | |
+| 关键字列表 | | | | |
+| ------------ | ------------ | ------------ | ------------ | ------------ |
+| ABORT | CREATE | IGNORE | NULL | STAR |
+| ACCOUNT | CTIME | IMMEDIATE | OF | STATE |
+| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT |
+| ADD | DATABASES | IN | OR | STATE_WINDOW |
+| AFTER | DAYS | INITIALLY | ORDER | STORAGE |
+| ALL | DBS | INSERT | PARTITIONS | STREAM |
+| ALTER | DEFERRED | INSTEAD | PASS | STREAMS |
+| AND | DELIMITERS | INT | PLUS | STRING |
+| AS | DESC | INTEGER | PPS | SYNCDB |
+| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE |
+| ATTACH | DETACH | INTO | PREV | TABLES |
+| BEFORE | DISTINCT | IS | PRIVILEGE | TAG |
+| BEGIN | DIVIDE | ISNULL | QTIME | TAGS |
+| BETWEEN | DNODE | JOIN | QUERIES | TBNAME |
+| BIGINT | DNODES | KEEP | QUERY | TIMES |
+| BINARY | DOT | KEY | QUORUM | TIMESTAMP |
+| BITAND | DOUBLE | KILL | RAISE | TINYINT |
+| BITNOT | DROP | LE | REM | TOPIC |
+| BITOR | EACH | LIKE | REPLACE | TOPICS |
+| BLOCKS | END | LIMIT | REPLICA | TRIGGER |
+| BOOL | EQ | LINEAR | RESET | TSERIES |
+| BY | EXISTS | LOCAL | RESTRICT | UMINUS |
+| CACHE | EXPLAIN | LP | ROW | UNION |
+| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED |
+| CASCADE | FILE | LT | RSHIFT | UPDATE |
+| CHANGE | FILL | MATCH | SCORES | UPLUS |
+| CLUSTER | FLOAT | MAXROWS | SELECT | USE |
+| COLON | FOR | MINROWS | SEMI | USER |
+| COLUMN | FROM | MINUS | SESSION | USERS |
+| COMMA | FSYNC | MNODES | SET | USING |
+| COMP | GE | MODIFY | SHOW | VALUES |
+| COMPACT | GLOB | MODULES | SLASH | VARIABLE |
+| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES |
+| CONFLICT | GROUP | NE | SLIMIT | VGROUPS |
+| CONNECTION | GT | NONE | SMALLINT | VIEW |
+| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
+| CONNS | ID | NOTNULL | STABLE | WAL |
+| COPY | IF | NOW | STABLES | WHERE |
diff --git a/documentation20/cn/12.taos-sql/01.error-code/docs.md b/documentation20/cn/12.taos-sql/01.error-code/docs.md
index 95975dba5aeeeee9f42c3bf0b34f48095ea83fa3..867aa18715f87a1dfc9ea36203d32382bb726e30 100644
--- a/documentation20/cn/12.taos-sql/01.error-code/docs.md
+++ b/documentation20/cn/12.taos-sql/01.error-code/docs.md
@@ -1,172 +1,172 @@
# TDengine 2.0 错误码以及对应的十进制码
-| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
-|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
-|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
-|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
-|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
-|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
-|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
-|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
-|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
-|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
-|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
-|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
-|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
-|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
-|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
-|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
-|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
-|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
-|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
-|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
-|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
-|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
-|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
-|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
-|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
-|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
-|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
-|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
-|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
-|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
-|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
-|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
-|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
-|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
-|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
-|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
-|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
-|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
-|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
-|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
-|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
-|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
-|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
-|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
-|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
-|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
-|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
-|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
-|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
-|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
-|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
-|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
-|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
-|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
-|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
-|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
-|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
-|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
-|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
-|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
-|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
-|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
-|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
-|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
-|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
-|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
-|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
-|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
-|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
-|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
-|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
-|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
-|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
-|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
-|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
-|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
-|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
-|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
-|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
-|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
-|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
-|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
-|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
-|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
-|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
-|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
-|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
-|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
-|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
-|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
-|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
-|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
-|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
-|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
-|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
-|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
-|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
-|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
-|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
-|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
-|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
-|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
-|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
-|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
-|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
-|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
-|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
-|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
-|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
-|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
-|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
-|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
-|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
-|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
-|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
-|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
-|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
-|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
-|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
-|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
-|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
-|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
-|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
-|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
-|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
-|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
-|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
-|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
-|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
-|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
-|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
-|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
-|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
-|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
-|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
-|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
-|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
-|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
-|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
-|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
-|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
-|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
-|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
-|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
-|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
-|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
-|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
-|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
-|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
-|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
-|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
-|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
-|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
-|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
-|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
-|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
-|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
-|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
-|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
-|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
-|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
-|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
-|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
-|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
-|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
-|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
-|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
-|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
-|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
-|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|
\ No newline at end of file
+| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
+| :-------------------------------------- | :--: | :----------------: | :------------------------------------------- | :--------------- |
+| TSDB_CODE_RPC_ACTION_IN_PROGRESS | 0 | 0x0001 | "Action in progress" | -2147483647 |
+| TSDB_CODE_RPC_AUTH_REQUIRED | 0 | 0x0002 | "Authentication required" | -2147483646 |
+| TSDB_CODE_RPC_AUTH_FAILURE | 0 | 0x0003 | "Authentication failure" | -2147483645 |
+| TSDB_CODE_RPC_REDIRECT | 0 | 0x0004 | "Redirect" | -2147483644 |
+| TSDB_CODE_RPC_NOT_READY | 0 | 0x0005 | "System not ready" | -2147483643 |
+| TSDB_CODE_RPC_ALREADY_PROCESSED | 0 | 0x0006 | "Message already processed" | -2147483642 |
+| TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED | 0 | 0x0007 | "Last session not finished" | -2147483641 |
+| TSDB_CODE_RPC_MISMATCHED_LINK_ID | 0 | 0x0008 | "Mismatched meter id" | -2147483640 |
+| TSDB_CODE_RPC_TOO_SLOW | 0 | 0x0009 | "Processing of request timed out" | -2147483639 |
+| TSDB_CODE_RPC_MAX_SESSIONS | 0 | 0x000A | "Number of sessions reached limit" | -2147483638 |
+| TSDB_CODE_RPC_NETWORK_UNAVAIL | 0 | 0x000B | "Unable to establish connection" | -2147483637 |
+| TSDB_CODE_RPC_APP_ERROR | 0 | 0x000C | "Unexpected generic error in RPC" | -2147483636 |
+| TSDB_CODE_RPC_UNEXPECTED_RESPONSE | 0 | 0x000D | "Unexpected response" | -2147483635 |
+| TSDB_CODE_RPC_INVALID_VALUE | 0 | 0x000E | "Invalid value" | -2147483634 |
+| TSDB_CODE_RPC_INVALID_TRAN_ID | 0 | 0x000F | "Invalid transaction id" | -2147483633 |
+| TSDB_CODE_RPC_INVALID_SESSION_ID | 0 | 0x0010 | "Invalid session id" | -2147483632 |
+| TSDB_CODE_RPC_INVALID_MSG_TYPE | 0 | 0x0011 | "Invalid message type" | -2147483631 |
+| TSDB_CODE_RPC_INVALID_RESPONSE_TYPE | 0 | 0x0012 | "Invalid response type" | -2147483630 |
+| TSDB_CODE_RPC_INVALID_TIME_STAMP | 0 | 0x0013 | "Invalid timestamp" | -2147483629 |
+| TSDB_CODE_COM_OPS_NOT_SUPPORT | 0 | 0x0100 | "Operation not supported" | -2147483392 |
+| TSDB_CODE_COM_MEMORY_CORRUPTED | 0 | 0x0101 | "Memory corrupted" | -2147483391 |
+| TSDB_CODE_COM_OUT_OF_MEMORY | 0 | 0x0102 | "Out of memory" | -2147483390 |
+| TSDB_CODE_COM_INVALID_CFG_MSG | 0 | 0x0103 | "Invalid config message" | -2147483389 |
+| TSDB_CODE_COM_FILE_CORRUPTED | 0 | 0x0104 | "Data file corrupted" | -2147483388 |
+| TSDB_CODE_TSC_INVALID_OPERATION | 0 | 0x0200 | "Invalid SQL statement" | -2147483136 |
+| TSDB_CODE_TSC_INVALID_QHANDLE | 0 | 0x0201 | "Invalid qhandle" | -2147483135 |
+| TSDB_CODE_TSC_INVALID_TIME_STAMP | 0 | 0x0202 | "Invalid combination of client/service time" | -2147483134 |
+| TSDB_CODE_TSC_INVALID_VALUE | 0 | 0x0203 | "Invalid value in client" | -2147483133 |
+| TSDB_CODE_TSC_INVALID_VERSION | 0 | 0x0204 | "Invalid client version" | -2147483132 |
+| TSDB_CODE_TSC_INVALID_IE | 0 | 0x0205 | "Invalid client ie" | -2147483131 |
+| TSDB_CODE_TSC_INVALID_FQDN | 0 | 0x0206 | "Invalid host name" | -2147483130 |
+| TSDB_CODE_TSC_INVALID_USER_LENGTH | 0 | 0x0207 | "Invalid user name" | -2147483129 |
+| TSDB_CODE_TSC_INVALID_PASS_LENGTH | 0 | 0x0208 | "Invalid password" | -2147483128 |
+| TSDB_CODE_TSC_INVALID_DB_LENGTH | 0 | 0x0209 | "Database name too long" | -2147483127 |
+| TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH | 0 | 0x020A | "Table name too long" | -2147483126 |
+| TSDB_CODE_TSC_INVALID_CONNECTION | 0 | 0x020B | "Invalid connection" | -2147483125 |
+| TSDB_CODE_TSC_OUT_OF_MEMORY | 0 | 0x020C | "System out of memory" | -2147483124 |
+| TSDB_CODE_TSC_NO_DISKSPACE | 0 | 0x020D | "System out of disk space" | -2147483123 |
+| TSDB_CODE_TSC_QUERY_CACHE_ERASED | 0 | 0x020E | "Query cache erased" | -2147483122 |
+| TSDB_CODE_TSC_QUERY_CANCELLED | 0 | 0x020F | "Query terminated" | -2147483121 |
+| TSDB_CODE_TSC_SORTED_RES_TOO_MANY | 0 | 0x0210 | "Result set too large to be sorted" | -2147483120 |
+| TSDB_CODE_TSC_APP_ERROR | 0 | 0x0211 | "Application error" | -2147483119 |
+| TSDB_CODE_TSC_ACTION_IN_PROGRESS | 0 | 0x0212 | "Action in progress" | -2147483118 |
+| TSDB_CODE_TSC_DISCONNECTED | 0 | 0x0213 | "Disconnected from service" | -2147483117 |
+| TSDB_CODE_TSC_NO_WRITE_AUTH | 0 | 0x0214 | "No write permission" | -2147483116 |
+| TSDB_CODE_MND_MSG_NOT_PROCESSED | 0 | 0x0300 | "Message not processed" | -2147482880 |
+| TSDB_CODE_MND_ACTION_IN_PROGRESS | 0 | 0x0301 | "Message is progressing" | -2147482879 |
+| TSDB_CODE_MND_ACTION_NEED_REPROCESSED | 0 | 0x0302 | "Messag need to be reprocessed" | -2147482878 |
+| TSDB_CODE_MND_NO_RIGHTS | 0 | 0x0303 | "Insufficient privilege for operation" | -2147482877 |
+| TSDB_CODE_MND_APP_ERROR | 0 | 0x0304 | "Unexpected generic error in mnode" | -2147482876 |
+| TSDB_CODE_MND_INVALID_CONNECTION | 0 | 0x0305 | "Invalid message connection" | -2147482875 |
+| TSDB_CODE_MND_INVALID_MSG_VERSION | 0 | 0x0306 | "Incompatible protocol version" | -2147482874 |
+| TSDB_CODE_MND_INVALID_MSG_LEN | 0 | 0x0307 | "Invalid message length" | -2147482873 |
+| TSDB_CODE_MND_INVALID_MSG_TYPE | 0 | 0x0308 | "Invalid message type" | -2147482872 |
+| TSDB_CODE_MND_TOO_MANY_SHELL_CONNS | 0 | 0x0309 | "Too many connections" | -2147482871 |
+| TSDB_CODE_MND_OUT_OF_MEMORY | 0 | 0x030A | "Out of memory in mnode" | -2147482870 |
+| TSDB_CODE_MND_INVALID_SHOWOBJ | 0 | 0x030B | "Data expired" | -2147482869 |
+| TSDB_CODE_MND_INVALID_QUERY_ID | 0 | 0x030C | "Invalid query id" | -2147482868 |
+| TSDB_CODE_MND_INVALID_STREAM_ID | 0 | 0x030D | "Invalid stream id" | -2147482867 |
+| TSDB_CODE_MND_INVALID_CONN_ID | 0 | 0x030E | "Invalid connection id" | -2147482866 |
+| TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE | 0 | 0x0320 | "Object already there" | -2147482848 |
+| TSDB_CODE_MND_SDB_ERROR | 0 | 0x0321 | "Unexpected generic error in sdb" | -2147482847 |
+| TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE | 0 | 0x0322 | "Invalid table type" | -2147482846 |
+| TSDB_CODE_MND_SDB_OBJ_NOT_THERE | 0 | 0x0323 | "Object not there" | -2147482845 |
+| TSDB_CODE_MND_SDB_INVAID_META_ROW | 0 | 0x0324 | "Invalid meta row" | -2147482844 |
+| TSDB_CODE_MND_SDB_INVAID_KEY_TYPE | 0 | 0x0325 | "Invalid key type" | -2147482843 |
+| TSDB_CODE_MND_DNODE_ALREADY_EXIST | 0 | 0x0330 | "DNode already exists" | -2147482832 |
+| TSDB_CODE_MND_DNODE_NOT_EXIST | 0 | 0x0331 | "DNode does not exist" | -2147482831 |
+| TSDB_CODE_MND_VGROUP_NOT_EXIST | 0 | 0x0332 | "VGroup does not exist" | -2147482830 |
+| TSDB_CODE_MND_NO_REMOVE_MASTER | 0 | 0x0333 | "Master DNode cannot be removed" | -2147482829 |
+| TSDB_CODE_MND_NO_ENOUGH_DNODES | 0 | 0x0334 | "Out of DNodes" | -2147482828 |
+| TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT | 0 | 0x0335 | "Cluster cfg inconsistent" | -2147482827 |
+| TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION | 0 | 0x0336 | "Invalid dnode cfg option" | -2147482826 |
+| TSDB_CODE_MND_BALANCE_ENABLED | 0 | 0x0337 | "Balance already enabled" | -2147482825 |
+| TSDB_CODE_MND_VGROUP_NOT_IN_DNODE | 0 | 0x0338 | "Vgroup not in dnode" | -2147482824 |
+| TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE | 0 | 0x0339 | "Vgroup already in dnode" | -2147482823 |
+| TSDB_CODE_MND_DNODE_NOT_FREE | 0 | 0x033A | "Dnode not avaliable" | -2147482822 |
+| TSDB_CODE_MND_INVALID_CLUSTER_ID | 0 | 0x033B | "Cluster id not match" | -2147482821 |
+| TSDB_CODE_MND_NOT_READY | 0 | 0x033C | "Cluster not ready" | -2147482820 |
+| TSDB_CODE_MND_ACCT_ALREADY_EXIST | 0 | 0x0340 | "Account already exists" | -2147482816 |
+| TSDB_CODE_MND_INVALID_ACCT | 0 | 0x0341 | "Invalid account" | -2147482815 |
+| TSDB_CODE_MND_INVALID_ACCT_OPTION | 0 | 0x0342 | "Invalid account options" | -2147482814 |
+| TSDB_CODE_MND_USER_ALREADY_EXIST | 0 | 0x0350 | "User already exists" | -2147482800 |
+| TSDB_CODE_MND_INVALID_USER | 0 | 0x0351 | "Invalid user" | -2147482799 |
+| TSDB_CODE_MND_INVALID_USER_FORMAT | 0 | 0x0352 | "Invalid user format" | -2147482798 |
+| TSDB_CODE_MND_INVALID_PASS_FORMAT | 0 | 0x0353 | "Invalid password format" | -2147482797 |
+| TSDB_CODE_MND_NO_USER_FROM_CONN | 0 | 0x0354 | "Can not get user from conn" | -2147482796 |
+| TSDB_CODE_MND_TOO_MANY_USERS | 0 | 0x0355 | "Too many users" | -2147482795 |
+| TSDB_CODE_MND_TABLE_ALREADY_EXIST | 0 | 0x0360 | "Table already exists" | -2147482784 |
+| TSDB_CODE_MND_INVALID_TABLE_ID | 0 | 0x0361 | "Table name too long" | -2147482783 |
+| TSDB_CODE_MND_INVALID_TABLE_NAME | 0 | 0x0362 | "Table does not exist" | -2147482782 |
+| TSDB_CODE_MND_INVALID_TABLE_TYPE | 0 | 0x0363 | "Invalid table type in tsdb" | -2147482781 |
+| TSDB_CODE_MND_TOO_MANY_TAGS | 0 | 0x0364 | "Too many tags" | -2147482780 |
+| TSDB_CODE_MND_TOO_MANY_TIMESERIES | 0 | 0x0366 | "Too many time series" | -2147482778 |
+| TSDB_CODE_MND_NOT_SUPER_TABLE | 0 | 0x0367 | "Not super table" | -2147482777 |
+| TSDB_CODE_MND_COL_NAME_TOO_LONG | 0 | 0x0368 | "Tag name too long" | -2147482776 |
+| TSDB_CODE_MND_TAG_ALREAY_EXIST | 0 | 0x0369 | "Tag already exists" | -2147482775 |
+| TSDB_CODE_MND_TAG_NOT_EXIST | 0 | 0x036A | "Tag does not exist" | -2147482774 |
+| TSDB_CODE_MND_FIELD_ALREAY_EXIST | 0 | 0x036B | "Field already exists" | -2147482773 |
+| TSDB_CODE_MND_FIELD_NOT_EXIST | 0 | 0x036C | "Field does not exist" | -2147482772 |
+| TSDB_CODE_MND_INVALID_STABLE_NAME | 0 | 0x036D | "Super table does not exist" | -2147482771 |
+| TSDB_CODE_MND_DB_NOT_SELECTED | 0 | 0x0380 | "Database not specified or available" | -2147482752 |
+| TSDB_CODE_MND_DB_ALREADY_EXIST | 0 | 0x0381 | "Database already exists" | -2147482751 |
+| TSDB_CODE_MND_INVALID_DB_OPTION | 0 | 0x0382 | "Invalid database options" | -2147482750 |
+| TSDB_CODE_MND_INVALID_DB | 0 | 0x0383 | "Invalid database name" | -2147482749 |
+| TSDB_CODE_MND_MONITOR_DB_FORBIDDEN | 0 | 0x0384 | "Cannot delete monitor database" | -2147482748 |
+| TSDB_CODE_MND_TOO_MANY_DATABASES | 0 | 0x0385 | "Too many databases for account" | -2147482747 |
+| TSDB_CODE_MND_DB_IN_DROPPING | 0 | 0x0386 | "Database not available" | -2147482746 |
+| TSDB_CODE_DND_MSG_NOT_PROCESSED | 0 | 0x0400 | "Message not processed" | -2147482624 |
+| TSDB_CODE_DND_OUT_OF_MEMORY | 0 | 0x0401 | "Dnode out of memory" | -2147482623 |
+| TSDB_CODE_DND_NO_WRITE_ACCESS | 0 | 0x0402 | "No permission for disk files in dnode" | -2147482622 |
+| TSDB_CODE_DND_INVALID_MSG_LEN | 0 | 0x0403 | "Invalid message length" | -2147482621 |
+| TSDB_CODE_VND_ACTION_IN_PROGRESS | 0 | 0x0500 | "Action in progress" | -2147482368 |
+| TSDB_CODE_VND_MSG_NOT_PROCESSED | 0 | 0x0501 | "Message not processed" | -2147482367 |
+| TSDB_CODE_VND_ACTION_NEED_REPROCESSED | 0 | 0x0502 | "Action need to be reprocessed" | -2147482366 |
+| TSDB_CODE_VND_INVALID_VGROUP_ID | 0 | 0x0503 | "Invalid Vgroup ID" | -2147482365 |
+| TSDB_CODE_VND_INIT_FAILED | 0 | 0x0504 | "Vnode initialization failed" | -2147482364 |
+| TSDB_CODE_VND_NO_DISKSPACE | 0 | 0x0505 | "System out of disk space" | -2147482363 |
+| TSDB_CODE_VND_NO_DISK_PERMISSIONS | 0 | 0x0506 | "No write permission for disk files" | -2147482362 |
+| TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR | 0 | 0x0507 | "Missing data file" | -2147482361 |
+| TSDB_CODE_VND_OUT_OF_MEMORY | 0 | 0x0508 | "Out of memory" | -2147482360 |
+| TSDB_CODE_VND_APP_ERROR | 0 | 0x0509 | "Unexpected generic error in vnode" | -2147482359 |
+| TSDB_CODE_VND_INVALID_STATUS | 0 | 0x0510 | "Database not ready" | -2147482352 |
+| TSDB_CODE_VND_NOT_SYNCED | 0 | 0x0511 | "Database suspended" | -2147482351 |
+| TSDB_CODE_VND_NO_WRITE_AUTH | 0 | 0x0512 | "Write operation denied" | -2147482350 |
+| TSDB_CODE_TDB_INVALID_TABLE_ID | 0 | 0x0600 | "Invalid table ID" | -2147482112 |
+| TSDB_CODE_TDB_INVALID_TABLE_TYPE | 0 | 0x0601 | "Invalid table type" | -2147482111 |
+| TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION | 0 | 0x0602 | "Invalid table schema version" | -2147482110 |
+| TSDB_CODE_TDB_TABLE_ALREADY_EXIST | 0 | 0x0603 | "Table already exists" | -2147482109 |
+| TSDB_CODE_TDB_INVALID_CONFIG | 0 | 0x0604 | "Invalid configuration" | -2147482108 |
+| TSDB_CODE_TDB_INIT_FAILED | 0 | 0x0605 | "Tsdb init failed" | -2147482107 |
+| TSDB_CODE_TDB_NO_DISKSPACE | 0 | 0x0606 | "No diskspace for tsdb" | -2147482106 |
+| TSDB_CODE_TDB_NO_DISK_PERMISSIONS | 0 | 0x0607 | "No permission for disk files" | -2147482105 |
+| TSDB_CODE_TDB_FILE_CORRUPTED | 0 | 0x0608 | "Data file(s) corrupted" | -2147482104 |
+| TSDB_CODE_TDB_OUT_OF_MEMORY | 0 | 0x0609 | "Out of memory" | -2147482103 |
+| TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE | 0 | 0x060A | "Tag too old" | -2147482102 |
+| TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE | 0 | 0x060B | "Timestamp data out of range" | -2147482101 |
+| TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP | 0 | 0x060C | "Submit message is messed up" | -2147482100 |
+| TSDB_CODE_TDB_INVALID_ACTION | 0 | 0x060D | "Invalid operation" | -2147482099 |
+| TSDB_CODE_TDB_INVALID_CREATE_TB_MSG | 0 | 0x060E | "Invalid creation of table" | -2147482098 |
+| TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM | 0 | 0x060F | "No table data in memory skiplist" | -2147482097 |
+| TSDB_CODE_TDB_FILE_ALREADY_EXISTS | 0 | 0x0610 | "File already exists" | -2147482096 |
+| TSDB_CODE_TDB_TABLE_RECONFIGURE | 0 | 0x0611 | "Need to reconfigure table" | -2147482095 |
+| TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO | 0 | 0x0612 | "Invalid information to create table" | -2147482094 |
+| TSDB_CODE_QRY_INVALID_QHANDLE | 0 | 0x0700 | "Invalid handle" | -2147481856 |
+| TSDB_CODE_QRY_INVALID_MSG | 0 | 0x0701 | "Invalid message" | -2147481855 |
+| TSDB_CODE_QRY_NO_DISKSPACE | 0 | 0x0702 | "No diskspace for query" | -2147481854 |
+| TSDB_CODE_QRY_OUT_OF_MEMORY | 0 | 0x0703 | "System out of memory" | -2147481853 |
+| TSDB_CODE_QRY_APP_ERROR | 0 | 0x0704 | "Unexpected generic error in query" | -2147481852 |
+| TSDB_CODE_QRY_DUP_JOIN_KEY | 0 | 0x0705 | "Duplicated join key" | -2147481851 |
+| TSDB_CODE_QRY_EXCEED_TAGS_LIMIT | 0 | 0x0706 | "Tag conditon too many" | -2147481850 |
+| TSDB_CODE_QRY_NOT_READY | 0 | 0x0707 | "Query not ready" | -2147481849 |
+| TSDB_CODE_QRY_HAS_RSP | 0 | 0x0708 | "Query should response" | -2147481848 |
+| TSDB_CODE_GRANT_EXPIRED | 0 | 0x0800 | "License expired" | -2147481600 |
+| TSDB_CODE_GRANT_DNODE_LIMITED | 0 | 0x0801 | "DNode creation limited by licence" | -2147481599 |
+| TSDB_CODE_GRANT_ACCT_LIMITED | 0 | 0x0802 | "Account creation limited by license" | -2147481598 |
+| TSDB_CODE_GRANT_TIMESERIES_LIMITED | 0 | 0x0803 | "Table creation limited by license" | -2147481597 |
+| TSDB_CODE_GRANT_DB_LIMITED | 0 | 0x0804 | "DB creation limited by license" | -2147481596 |
+| TSDB_CODE_GRANT_USER_LIMITED | 0 | 0x0805 | "User creation limited by license" | -2147481595 |
+| TSDB_CODE_GRANT_CONN_LIMITED | 0 | 0x0806 | "Conn creation limited by license" | -2147481594 |
+| TSDB_CODE_GRANT_STREAM_LIMITED | 0 | 0x0807 | "Stream creation limited by license" | -2147481593 |
+| TSDB_CODE_GRANT_SPEED_LIMITED | 0 | 0x0808 | "Write speed limited by license" | -2147481592 |
+| TSDB_CODE_GRANT_STORAGE_LIMITED | 0 | 0x0809 | "Storage capacity limited by license" | -2147481591 |
+| TSDB_CODE_GRANT_QUERYTIME_LIMITED | 0 | 0x080A | "Query time limited by license" | -2147481590 |
+| TSDB_CODE_GRANT_CPU_LIMITED | 0 | 0x080B | "CPU cores limited by license" | -2147481589 |
+| TSDB_CODE_SYN_INVALID_CONFIG | 0 | 0x0900 | "Invalid Sync Configuration" | -2147481344 |
+| TSDB_CODE_SYN_NOT_ENABLED | 0 | 0x0901 | "Sync module not enabled" | -2147481343 |
+| TSDB_CODE_WAL_APP_ERROR | 0 | 0x1000 | "Unexpected generic error in wal" | -2147479552 |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 58191e0bd8faa02b0ff2381f1cdd576b379ae9fc..4368e5fa1dfef4300fe8e1c44b47471fb55f70e1 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -34,40 +34,41 @@ taos> DESCRIBE meters;
- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
-- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数
+- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数)
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
-TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。
+TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
-| | 类型 | Bytes | 说明 |
+| # | **类型** | **Bytes** | **说明** |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
-| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) |
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制) |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 6 | BINARY | 自定义 | 用于记录 ASCII 型字符串。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
+| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
-| 10 | NCHAR | 自定义 | 用于记录非 ASCII 型字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
+| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
+
**Tips**:
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
-2. 应避免使用 BINARY 类型来保存非 ASCII 型的字符串,会很容易导致数据乱码等错误。正确的做法是使用 NCHAR 类型来保存中文字符。
+2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
## 数据库管理
- **创建数据库**
```mysql
- CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
+ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
```
- 说明:
+ 说明:
- 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
+ 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
@@ -75,7 +76,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
4) 一条SQL 语句的最大长度为65480个字符;
- 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
+ 5) 数据库还有更多与存储相关的配置参数,请参见 [服务端配置](https://www.taosdata.com/cn/documentation/administrator#config) 章节。
- **显示系统当前参数**
@@ -88,13 +89,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
USE db_name;
```
- 使用/切换数据库
+ 使用/切换数据库(在 RESTful 连接方式下无效)。
- **删除数据库**
```mysql
DROP DATABASE [IF EXISTS] db_name;
```
- 删除数据库。所包含的全部数据表将被删除,谨慎使用
+ 删除数据库。指定 Database 所包含的全部数据表将被删除,谨慎使用!
- **修改数据库参数**
```mysql
@@ -125,9 +126,10 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
ALTER DATABASE db_name CACHELAST 0;
```
- CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持,修改后需要重启服务器生效。)
+ CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。)
+ 说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
- **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
+ **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。
- **显示系统所有数据库**
@@ -135,6 +137,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
SHOW DATABASES;
```
+- **显示一个数据库的创建语句**
+
+ ```mysql
+ SHOW CREATE DATABASE db_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
+
+
## 表管理
- **创建数据表**
@@ -159,22 +169,22 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
```
- 以指定的超级表为模板,指定 tags 的值来创建数据表。
+ 以指定的超级表为模板,指定 TAGS 的值来创建数据表。
-- **以超级表为模板创建数据表,并指定具体的 tags 列**
+- **以超级表为模板创建数据表,并指定具体的 TAGS 列**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
```
- 以指定的超级表为模板,指定一部分 tags 列的值来创建数据表。(没被指定的 tags 列会设为空值。)
- 说明:从 2.0.17 版本开始支持这种方式。在之前的版本中,不允许指定 tags 列,而必须显式给出所有 tags 列的取值。
+ 以指定的超级表为模板,指定一部分 TAGS 列的值来创建数据表(没被指定的 TAGS 列会设为空值)。
+ 说明:从 2.0.17.0 版本开始支持这种方式。在之前的版本中,不允许指定 TAGS 列,而必须显式给出所有 TAGS 列的取值。
- **批量创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
- 以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本)
+ 以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。
说明:
@@ -198,13 +208,21 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
- 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
+ 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
+
+- **显示一个数据表的创建语句**
+
+ ```mysql
+ SHOW CREATE TABLE tb_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的数据表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的数据表。
- **在线修改显示字符宽度**
```mysql
SET MAX_BINARY_DISPLAY_WIDTH ;
```
+ 如显示的内容后面以...结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。
- **获取表的结构信息**
@@ -221,14 +239,22 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
1) 列的最大个数为1024,最小个数为2;
- 2) 列名最大长度为64;
+ 2) 列名最大长度为64。
- **表删除列**
```mysql
ALTER TABLE tb_name DROP COLUMN field_name;
```
- 如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构
+ 如果表是通过超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。
+
+- **表修改列宽**
+
+ ```mysql
+ ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length);
+ ```
+ 如果数据列的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增)
+ 如果表是通过超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。
## 超级表STable管理
@@ -239,15 +265,15 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
```
- 创建 STable,与创建表的 SQL 语法相似,但需指定 TAGS 字段的名称和类型
+ 创建 STable,与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型
说明:
- 1) TAGS 列的数据类型不能是 timestamp 类型;
+ 1) TAGS 列的数据类型不能是 timestamp 类型;(从 2.1.3.0 版本开始,TAGS 列中支持使用 timestamp 类型,但需注意在 TAGS 中的 timestamp 列写入数据时需要提供给定值,而暂不支持四则运算,例如 `NOW + 10s` 这类表达式)
2) TAGS 列名不能与其他列名相同;
- 3) TAGS 列名不能为预留关键字;
+ 3) TAGS 列名不能为预留关键字(参见:[参数限制与保留关键字](https://www.taosdata.com/cn/documentation/administrator#keywords) 章节);
4) TAGS 最多允许 128 个,至少 1 个,总长度不超过 16 KB。
@@ -261,10 +287,17 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
- **显示当前数据库下的所有超级表信息**
```mysql
- SHOW STABLES [LIKE tb_name_wildcar];
+ SHOW STABLES [LIKE tb_name_wildcard];
```
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
+- **显示一个超级表的创建语句**
+
+ ```mysql
+ SHOW CREATE STABLE stb_name;
+ ```
+ 常用于数据库迁移。对一个已经存在的超级表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的超级表。
+
- **获取超级表的结构信息**
```mysql
@@ -283,6 +316,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
ALTER STABLE stb_name DROP COLUMN field_name;
```
+- **超级表修改列宽**
+
+ ```mysql
+ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length);
+ ```
+ 如果数据列的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增)
+
## 超级表 STable 中 TAG 管理
- **添加标签**
@@ -306,6 +346,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
+- **修改标签列宽度**
+
+ ```mysql
+ ALTER STABLE stb_name MODIFY TAG tag_name data_type(length);
+ ```
+ 如果标签的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增)
+
- **修改子表标签值**
```mysql
@@ -315,62 +362,82 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
## 数据写入
-- **插入一条记录**
+### 写入语法:
+
+```mysql
+INSERT INTO
+ tb_name
+ [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
+ [(field1_name, ...)]
+ VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
+ [tb2_name
+ [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
+ [(field1_name, ...)]
+ VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
+ ...];
+```
+
+### 详细描述及示例:
+
+- **插入一条或多条记录**
+ 指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
```mysql
- INSERT INTO tb_name VALUES (field_value, ...);
+ INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32);
```
- 向表tb_name中插入一条记录
-
-- **插入一条记录,数据对应到指定的列**
+ 或者,可以通过如下语句写入两行记录:
```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...);
+ INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33);
```
- 向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。
+ **注意:**
+ 1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000。
+ 2)在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为 NOW,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
+ 3)允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值(数据文件存储数据的时间跨度,单位为天)。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
-- **插入多条记录**
+- **插入记录,数据对应到指定的列**
+ 向数据子表中插入记录时,无论插入一行还是多行,都可以让数据对应到指定的列。对于 SQL 语句中没有出现的列,数据库将自动填充为 NULL。主键(时间戳)不能为 NULL。例如:
```mysql
- INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31);
```
- 向表tb_name中插入多条记录
- **注意**:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
+ **说明:**如果不指定列,也即使用全列模式——那么在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列模式写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入 NULL。
-- **按指定的列插入多条记录**
+- **向多个表插入记录**
+ 可以在一条语句中,分别向多个表插入一条或多条记录,并且也可以在插入过程中指定列。例如:
```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+ d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
- 向表tb_name中按指定的列插入多条记录
-- **向多个表插入多条记录**
+- **插入记录时自动建表**
+ 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
```mysql
- INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
- tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
```
- 同时向表tb1_name和tb2_name中分别插入多条记录
-
-- **同时向多个表按列插入多条记录**
+ 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
```mysql
- INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
- tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33);
```
- 同时向表tb1_name和tb2_name中按列分别插入多条记录
-
- 注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
-
-- **插入记录时自动建表**
+ 自动建表语法也支持在一条语句中向多个表插入记录。例如:
```mysql
- INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
+ d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
+ d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
- 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 tags 取值。
+ **说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。
-- **插入记录时自动建表,并指定具体的 tags 列**
+- **插入来自文件的数据记录**
+ 除了使用 VALUES 关键字插入一行或多行数据外,也可以把要写入的数据放在 CSV 文件中(英文逗号分隔、英文单引号括住每个值)供 SQL 指令读取。其中 CSV 文件无需表头。例如,如果 /tmp/csvfile.csv 文件的内容为:
+ ```
+ '2021-07-13 14:07:34.630', '10.2', '219', '0.32'
+ '2021-07-13 14:07:35.779', '10.15', '217', '0.33'
+ ```
+ 那么通过如下指令可以把这个文件中的数据写入子表中:
```mysql
- INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
```
- 在自动建表时,可以只是指定部分 tags 列的取值,未被指定的 tags 列将取为空值。
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
-说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。
+**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。
```mysql
taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
@@ -385,9 +452,9 @@ Query OK, 1 row(s) in set (0.001029s)
taos> SHOW TABLES;
Query OK, 0 row(s) in set (0.000946s)
-taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2);
+taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
-DB error: invalid SQL: keyword VALUES or FILE required
+DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
taos> SHOW TABLES;
table_name | created_time | columns | stable_name |
@@ -404,21 +471,18 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
- [INTERVAL (interval_val [, interval_offset])]
- [SLIDING sliding_val]
- [FILL fill_val]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]]
+ [FILL(fill_mod_and_val)]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
- [SLIMIT limit_val [, SOFFSET offset_val]]
- [LIMIT limit_val [, OFFSET offset_val]]
+ [SLIMIT limit_val [SOFFSET offset_val]]
+ [LIMIT limit_val [OFFSET offset_val]]
[>> export_file];
```
-#### SELECT子句
-
-一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。
-
-##### 通配符
+#### 通配符
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
```mysql
@@ -453,7 +517,7 @@ Query OK, 9 row(s) in set (0.002022s)
SELECT * FROM d1001;
SELECT d1001.* FROM d1001;
```
-在Join查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
+在JOIN查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```mysql
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
ts | current | voltage | phase | ts | current | voltage | phase |
@@ -469,8 +533,8 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
Query OK, 1 row(s) in set (0.020443s)
```
-在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
-```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
+在使用SQL函数来进行查询的过程中,部分SQL函数支持通配符操作。其中的区别在于:
+```count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```mysql
taos> SELECT COUNT(*) FROM d1001;
@@ -488,7 +552,7 @@ taos> SELECT FIRST(*) FROM d1001;
Query OK, 1 row(s) in set (0.000849s)
```
-##### 标签列
+#### 标签列
从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```mysql
@@ -504,12 +568,12 @@ Query OK, 2 row(s) in set (0.003112s)
##### 获取标签列的去重取值
-从 2.0.15 版本开始,支持在超级表查询标签列时,指定 distinct 关键字,这样将返回指定标签列的所有不重复取值。
+从 2.0.15 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。
```mysql
SELECT DISTINCT tag_name FROM stb_name;
```
-注意:目前 distinct 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
+注意:目前 DISTINCT 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
@@ -544,7 +608,7 @@ SELECT * FROM d1001;
#### 特殊功能
-部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
+部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database():
```mysql
taos> SELECT DATABASE();
database() |
@@ -552,7 +616,7 @@ taos> SELECT DATABASE();
power |
Query OK, 1 row(s) in set (0.000079s)
```
-如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
+如果登录的时候没有指定默认数据库,且没有使用```USE```命令切换数据,则返回NULL。
```mysql
taos> SELECT DATABASE();
database() |
@@ -560,7 +624,7 @@ taos> SELECT DATABASE();
NULL |
Query OK, 1 row(s) in set (0.000184s)
```
-获取服务器和客户端版本号:
+获取服务器和客户端版本号:
```mysql
taos> SELECT CLIENT_VERSION();
client_version() |
@@ -604,7 +668,7 @@ SELECT TBNAME, location FROM meters;
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
-以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如:
+以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如:
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
@@ -622,64 +686,94 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
Query OK, 1 row(s) in set (0.001091s)
```
-- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
-- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
+- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。
+ * 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。
+ * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。
+- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
-- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
-- 参数 SLIMIT 控制由 GROUP BY 指令划分的每个分组中的输出条数。
-- 通过”>>"输出结果可以导出到指定文件
+- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。且 `LIMIT 5 OFFSET 2` 可以简写为 `LIMIT 2, 5`。
+ * 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
+- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。且 `SLIMIT 5 SOFFSET 2` 可以简写为 `SLIMIT 2, 5`。
+- 通过 “>>” 输出结果可以导出到指定文件。
### 支持的条件过滤操作
-| Operation | Note | Applicable Data Types |
-| ----------- | ----------------------------- | ------------------------------------- |
-| > | larger than | **`timestamp`** and all numeric types |
-| < | smaller than | **`timestamp`** and all numeric types |
-| >= | larger than or equal to | **`timestamp`** and all numeric types |
-| <= | smaller than or equal to | **`timestamp`** and all numeric types |
-| = | equal to | all types |
-| <> | not equal to | all types |
-| between and | within a certain range | **`timestamp`** and all numeric types |
-| % | match with any char sequences | **`binary`** **`nchar`** |
-| _ | match with a single char | **`binary`** **`nchar`** |
-
-1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
-2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
-3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+| **Operation** | **Note** | **Applicable Data Types** |
+| --------------- | ----------------------------- | ----------------------------------------- |
+| > | larger than | **`timestamp`** and all numeric types |
+| < | smaller than | **`timestamp`** and all numeric types |
+| >= | larger than or equal to | **`timestamp`** and all numeric types |
+| <= | smaller than or equal to | **`timestamp`** and all numeric types |
+| = | equal to | all types |
+| <> | not equal to | all types |
+| between and | within a certain range | **`timestamp`** and all numeric types |
+| in | matches any value in a set | all types except first column `timestamp` |
+| % | match with any char sequences | **`binary`** **`nchar`** |
+| _ | match with a single char | **`binary`** **`nchar`** |
+
+1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
+2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
+3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
+4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
+
+
+
+
+### UNION ALL 操作符
+
+```mysql
+SELECT ...
+UNION ALL SELECT ...
+[UNION ALL SELECT ...]
+```
+
+TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
### SQL 示例
-- 对于下面的例子,表tb1用以下语句创建
+- 对于下面的例子,表tb1用以下语句创建:
```mysql
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
```
-- 查询tb1刚过去的一个小时的所有记录
+- 查询tb1刚过去的一个小时的所有记录:
```mysql
SELECT * FROM tb1 WHERE ts >= NOW - 1h;
```
-- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
+- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序:
```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
```
-- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
+- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始:
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
-- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`.
+- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`:
```mysql
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
```
-## SQL 函数
+
+## SQL 函数
### 聚合函数
@@ -695,11 +789,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:应用全部字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
说明:
- 1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。
+ 1)可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
@@ -730,7 +824,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
示例:
```mysql
@@ -757,7 +851,23 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表。
+ 适用于:**表、(超级表)**。
+
+ 说明:从 2.1.3.0 版本开始,TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+
+- **IRATE**
+ ```mysql
+ SELECT IRATE(field_name) FROM tb_name WHERE clause;
+ ```
+ 功能说明:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
+
+ 返回结果数据类型:双精度浮点数Double。
+
+ 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+
+ 适用于:**表、(超级表)**。
+
+ 说明:(从 2.1.3.0 版本开始新增此函数)IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
- **SUM**
```mysql
@@ -769,7 +879,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表、超级表。
+ 适用于:**表、超级表**。
示例:
```mysql
@@ -796,7 +906,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表)
+ 适用于:**表**。(从 2.0.15.1 版本开始,本函数也支持**超级表**)
示例:
```mysql
@@ -819,7 +929,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:自变量是时间戳,因变量是该列的值。
- 适用于:表。
+ 适用于:**表**。
示例:
```mysql
@@ -842,6 +952,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
示例:
```mysql
taos> SELECT MIN(current), MIN(voltage) FROM meters;
@@ -867,6 +979,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
示例:
```mysql
taos> SELECT MAX(current), MAX(voltage) FROM meters;
@@ -892,6 +1006,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:所有字段。
+ 适用于:**表、超级表**。
+
说明:
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
@@ -925,6 +1041,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:所有字段。
+ 适用于:**表、超级表**。
+
说明:
1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
@@ -950,17 +1068,21 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
- 功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
+ 功能说明: 统计表/超级表中某列的值最大 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)*k*值取值范围1≤*k*≤100;
- 2)系统同时返回该记录关联的时间戳列。
+ 2)系统同时返回该记录关联的时间戳列;
+
+ 3)限制:TOP函数不支持FILL子句。
示例:
```mysql
@@ -984,17 +1106,21 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
- 功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
+ 功能说明:统计表/超级表中某列的值最小 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
返回结果数据类型:同应用的字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)*k*值取值范围1≤*k*≤100;
- 2)系统同时返回该记录关联的时间戳列。
+ 2)系统同时返回该记录关联的时间戳列;
+
+ 3)限制:BOTTOM函数不支持FILL子句。
示例:
```mysql
@@ -1023,6 +1149,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表**。
+
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。
示例:
@@ -1038,12 +1166,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
```
- 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
+ 功能说明:统计表/超级表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
```mysql
@@ -1058,13 +1188,17 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
- 功能说明:返回表(超级表)的最后一条记录。
+ 功能说明:返回表/超级表的最后一条记录。
返回结果数据类型:同应用的字段。
应用字段:所有字段。
- 说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
+ 适用于:**表、超级表**。
+
+ 说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
+
+ 限制:LAST_ROW()不能与INTERVAL一起使用。
示例:
```mysql
@@ -1082,17 +1216,20 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```
### 计算函数
+
- **DIFF**
```mysql
SELECT DIFF(field_name) FROM tb_name [WHERE clause];
```
功能说明:统计表中某列的值与前一行对应值的差。
- 返回结果数据类型: 同应用字段。
+ 返回结果数据类型:同应用字段。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
- 说明:输出结果行数是范围内总行数减一,第一行没有结果输出。
+ 适用于:**表、(超级表)**。
+
+ 说明:输出结果行数是范围内总行数减一,第一行没有结果输出。从 2.1.3.0 版本开始,DIFF 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
示例:
```mysql
@@ -1104,16 +1241,32 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 2 row(s) in set (0.001162s)
```
+- **DERIVATIVE**
+ ```mysql
+ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
+ ```
+ 功能说明:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
+
+ 返回结果数据类型:双精度浮点数。
+
+ 应用字段:不能应用在 timestamp、binary、nchar、bool 类型字段。
+
+ 适用于:**表、(超级表)**。
+
+ 说明:(从 2.1.3.0 版本开始新增此函数)输出结果行数是范围内总行数减一,第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+
- **SPREAD**
```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
功能说明:统计表/超级表中某列的最大值和最小值之差。
- 返回结果数据类型: 双精度浮点数。
+ 返回结果数据类型:双精度浮点数。
应用字段:不能应用在binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。
示例:
@@ -1142,6 +1295,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
+ 适用于:**表、超级表**。
+
说明:
1)支持两列或多列之间进行计算,可使用括号控制计算优先级;
@@ -1158,49 +1313,57 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
Query OK, 3 row(s) in set (0.001046s)
```
-## 时间维度聚合
+## 按窗口切分聚合
-TDengine支持按时间段进行聚合,可以将表中数据按照时间段进行切割后聚合生成结果,比如温度传感器每秒采集一次数据,但需查询每隔10分钟的温度平均值。这个聚合适合于降维(down sample)操作, 语法如下:
+TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维(down sample)操作,语法如下:
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
SELECT function_list FROM stb_name
[WHERE where_condition]
- INTERVAL (interval [, offset])
- [SLIDING sliding]
- [FILL ({ VALUE | PREV | NULL | LINEAR})]
+ [SESSION(ts_col, tol_val)]
+ [STATE_WINDOW(col)]
+ [INTERVAL(interval [, offset]) [SLIDING sliding]]
+ [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
```
-- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
-- WHERE语句可以指定查询的起止时间和其他过滤条件
-- SLIDING语句用于指定聚合时间段的前向增量
-- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
- * 不进行填充:NONE(默认填充模式)。
- * VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
- * NULL填充:使用NULL填充数据。例如:fill(null)。
- * PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
+- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
+- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
+ 1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
+ * 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
+ * **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
+ 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
+ 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
+- WHERE 语句可以指定查询的起止时间和其他过滤条件。
+- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
+ 1. 不进行填充:NONE(默认填充模式)。
+ 2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
+ 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
+ 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
+ 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
+ 6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
说明:
- 1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
+ 1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
- 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group内不按照时间序列严格单调递增。
+ 3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
-**示例:** 智能电表的建表语句如下:
+**示例**: 智能电表的建表语句如下:
```mysql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。
-使用的查询语句如下:
+针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
```mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
@@ -1214,21 +1377,22 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
- 数据库名最大长度为 32
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
-- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
+- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
+- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
## TAOS SQL其他约定
-**group by的限制**
+**GROUP BY的限制**
-TAOS SQL支持对标签、tbname进行group by操作,也支持普通列进行group by,前提是:仅限一列且该列的唯一值小于10万个。
+TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。
-**join操作的限制**
+**JOIN操作的限制**
TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。
-**is not null与不为空的表达式适用范围**
+**IS NOT NULL与不为空的表达式适用范围**
-is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
+IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md
index c01247d345906bde021d88841d34f667c8991fa9..300ff27fe457fe50c78a4b5090ec20ea8edd8957 100644
--- a/documentation20/cn/13.faq/docs.md
+++ b/documentation20/cn/13.faq/docs.md
@@ -26,17 +26,17 @@
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/jdbcdriver找不到动态链接库/)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)
## 3. 创建数据表时提示more dnodes are needed
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)
## 4. 如何让TDengine crash时生成core文件?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/tdengine-crash时生成core文件的方法/)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)
-## 5. 遇到错误"Unable to establish connection", 我怎么办?
+## 5. 遇到错误“Unable to establish connection”, 我怎么办?
客户端遇到连接故障,请按照下面的步骤进行检查:
@@ -51,13 +51,13 @@
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
-5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
+5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
-8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
+8. 对于Windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障
@@ -70,7 +70,8 @@
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
-## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
+## 6. 遇到错误“Unexpected generic error in RPC”或者“Unable to resolve FQDN”,我怎么办?
+
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
@@ -86,6 +87,7 @@
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
+
## 9. 我可以删除或更新一条记录吗?
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
@@ -102,7 +104,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
-## 12. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
+## 12. Windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
```JAVA
@@ -115,15 +117,15 @@ Connection = DriverManager.getConnection(url, properties);
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
请更新至最新的JDBC驱动
-```JAVA
+```xml
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.4
+ 2.0.27
```
-## 14. taos connect failed, reason: invalid timestamp
+## 14. taos connect failed, reason: invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
@@ -157,7 +159,8 @@ ALTER LOCAL RESETLOG;
其含义是,清空本机所有由客户端生成的日志文件。
-## 18. 时间戳的时区信息是怎样处理的?
+
+## 18. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
@@ -166,3 +169,19 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
+
+
+## 19. TDengine 都会用到哪些网络端口?
+
+在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
+
+| 协议 | 默认端口 | 用途说明 | 修改方法 |
+| :--- | :-------- | :---------------------------------- | :------------------------------- |
+| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
+| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
+| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
+| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
+| TCP | 6042 | Arbitrator 的服务端口。 | 因 Arbitrator 启动参数设置变化。 |
+| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
+| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
+| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..a10c22ee622fe71f4215c981774b637fc7c177d9
--- /dev/null
+++ b/documentation20/en/00.index/docs.md
@@ -0,0 +1,142 @@
+# TDengine Documentation
+
+TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
+
+## [TDengine Introduction](/evaluation)
+
+* [TDengine Introduction and Features](/evaluation#intro)
+* [TDengine Use Scenes](/evaluation#scenes)
+* [TDengine Performance Metrics and Verification]((/evaluation#))
+
+## [Getting Started](/getting-started)
+
+* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
+
+- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
+- [Command-line](/getting-started#console) : an easy way to access TDengine server
+- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
+- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
+- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment
+
+## [Overall Architecture](/architecture)
+
+- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
+- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
+- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
+- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
+- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
+- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
+
+## [Data Modeling](/model)
+
+- [Create a Database](/model#create-db): create a database for all data collection points with similar features
+- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
+- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
+
+## [TAOS SQL](/taos-sql)
+
+- [Data Types](/taos-sql#data-type): support timestamp, int, float, nchar, bool, and other types
+- [Database Management](/taos-sql#management): add, drop, check databases
+- [Table Management](/taos-sql#table): add, drop, check, alter tables
+- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
+- [Tag Management](/taos-sql#tags): add, drop, alter tags
+- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
+- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
+- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
+- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
+- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
+- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
+
+## [Efficient Data Ingestion](/insert)
+
+- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
+- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
+- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
+- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
+- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
+
+## [Efficient Data Querying](/queries)
+
+- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
+- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
+- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
+
+## [Advanced Features](/advanced-features)
+
+- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
+- [Data Publisher/Subscriber](/advanced-features#subscribe): subscribe to the newly arrived data like a typical messaging system
+- [Cache](/advanced-features#cache): the newly arrived data of each device/table will always be cached
+- [Alarm Monitoring](/advanced-features#alert): automatically monitor out-of-threshold data, and actively push it based-on configuration rules
+
+## [Connector](/connector)
+
+- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
+- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
+- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
+- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
+- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
+- [Node.js Connector](/connector#nodejs): driver for connecting to TDengine server from Node.js applications
+- [C# Connector](/connector#csharp): driver for connecting to TDengine server from C# applications
+- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
+
+## [Connections with Other Tools](/connections)
+
+- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
+- [MATLAB](/connections#matlab): access data stored in TDengine server via JDBC configured within MATLAB
+- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
+- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
+
+## [Installation and Management of TDengine Cluster](/cluster)
+
+- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
+- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
+- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
+- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
+- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
+- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
+- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
+- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
+- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
+
+## [TDengine Operation and Maintenance](/administrator)
+
+- [Capacity Planning](/administrator#planning): Estimating hardware resources based on scenarios
+- [Fault Tolerance and Disaster Recovery](/administrator#tolerance): set the correct WAL and number of data replicas
+- [System Configuration](/administrator#config): port, cache size, file block size, and other system configurations
+- [User Management](/administrator#user): add/delete TDengine users, modify user password
+- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
+- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
+- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
+- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
+- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords
+
+## TDengine Technical Design
+
+- [System Module]: taosd functions and modules partitioning
+- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system
+- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
+
+## Common Tools
+
+- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
+- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
+- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
+
+## Performance: TDengine vs Others
+
+- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
+- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
+- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
+- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
+- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
+
+## More on IoT Big Data
+
+- [Characteristics of IoT and Industry Internet Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
+- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
+- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
+- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
+
+## FAQ
+
+- [FAQ: Common questions and answers](/faq)
diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..250f465d7b1280a78e18250f95aefaeca0c95415
--- /dev/null
+++ b/documentation20/en/01.evaluation/docs.md
@@ -0,0 +1,65 @@
+# TDengine Introduction
+
+## About TDengine
+
+TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
+
+One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
+
+- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
+- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
+- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
+- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
+- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
+- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost.
+
+With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
+
+
+
+Figure 1. TDengine Technology Ecosystem
+
+## Overall Scenarios of TDengine
+
+As an IoT Big Data platform, the typical application scenarios of TDengine are mainly presented in the IoT category, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
+
+### Characteristics and Requirements of Data Sources
+
+From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
+
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
+| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
+| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
+
+### System Architecture Requirements
+
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
+| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
+| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
+
+### System Function Requirements
+
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
+| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
+
+### System Performance Requirements
+
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
+| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
+| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
+
+### System Maintenance Requirements
+
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
+| Require controllable operation learning cost | | | √ | As above. |
+| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..3c9d9ac6af54cfd49a4b2700c8c79773f08a2120
--- /dev/null
+++ b/documentation20/en/02.getting-started/docs.md
@@ -0,0 +1,221 @@
+# Quick Start
+
+## Quick Install
+
+TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
+
+### Install from Source
+
+Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
+
+### Install from Docker Container
+
+Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
+
+### Install from Package
+
+It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
+
+Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
+
+For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
+
+## Quick Launch
+
+After installation, you can start the TDengine service by the `systemctl` command.
+
+```bash
+$ systemctl start taosd
+```
+
+Then check if the service is working now.
+
+```bash
+$ systemctl status taosd
+```
+
+If the service is running successfully, you can play around through TDengine shell `taos`.
+
+**Note:**
+
+- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
+- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
+- TDengine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
+- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
+
+ ```bash
+ $ which systemctl
+ ```
+
+If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
+
+## TDengine Shell Command Line
+
+To launch TDengine shell, the command line interface, in a Linux terminal, type:
+
+```bash
+$ taos
+```
+
+The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
+
+```cmd
+taos>
+```
+
+In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
+
+```mysql
+create database demo;
+
+use demo;
+
+create table t (ts timestamp, speed int);
+
+insert into t values ('2019-07-15 00:00:00', 10);
+
+insert into t values ('2019-07-15 01:00:00', 20);
+
+select * from t;
+
+ts | speed |
+
+===================================
+
+19-07-15 00:00:00.000| 10|
+
+19-07-15 01:00:00.000| 20|
+
+Query OK, 2 row(s) in set (0.001700s)
+```
+
+Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
+
+### Shell Command Line Parameters
+
+You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below:
+
+- -c, --config-dir: set the configuration directory. It is */etc/taos* by default.
+- -h, --host: set the IP address of the server it will connect to. Default is localhost.
+- -s, --commands: set the command to run without entering the shell.
+- -u, -- user: user name to connect to server. Default is root.
+- -p, --password: password. Default is 'taosdata'.
+- -?, --help: get a full list of supported options.
+
+Examples:
+
+```bash
+$ taos -h 192.168.0.1 -s "use db; show tables;"
+```
+
+### Run SQL Command Scripts
+
+Inside TDengine shell, you can run SQL scripts in a file with source command.
+
+```mysql
+taos> source ;
+```
+
+### Shell Tips
+
+- Use up/down arrow key to check the command history
+- To change the default password, use "alter user" command
+- Use ctrl+c to interrupt any queries
+- To clean the schema of local cached tables, execute command `RESET QUERY CACHE`
+
+## Experience TDengine’s Lightning Speed
+
+After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
+
+```bash
+$ taosdemo
+```
+
+Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
+
+It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
+
+In the TDengine client, enter sql query commands and then experience our lightning query speed.
+
+- query total rows of records:
+
+```mysql
+taos> select count(*) from test.meters;
+```
+
+- query average, max and min of the total 1 billion records:
+
+```mysql
+taos> select avg(f1), max(f2), min(f3) from test.meters;
+```
+
+- query the number of records where loc="beijing":
+
+```mysql
+taos> select count(*) from test.meters where loc="beijing";
+```
+
+- query the average, max and min of total records where areaid=10:
+
+```mysql
+taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
+```
+
+- query the average, max, min from table t10 when aggregating over every 10s:
+
+```mysql
+taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
+```
+
+**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
+
+## Client and Alarm Module
+
+If your client and server running on different machines, please install the client separately. Linux and Windows packages are provided:
+
+- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
+- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
+- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
+
+Linux package of Alarm Module is as following (please refer [How to Use Alarm Module](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
+
+- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
+
+## List of Supported Platforms
+
+List of platforms supported by TDengine server
+
+| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
+| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
+| X64 | ● | ● | | ○ | ● | ● |
+| Loongson MIPS64 | | | ● | | | |
+| Kunpeng ARM64 | | ○ | ○ | | ● | |
+| SWCPU Alpha64 | | | ○ | ● | | |
+| FT ARM64 | | ○Ubuntu Kylin | | | | |
+| Hygon X64 | ● | ● | ● | ○ | ● | ● |
+| Rockchip ARM64 | | | ○ | | | |
+| Allwinner ARM64 | | | ○ | | | |
+| Actions ARM64 | | | ○ | | | |
+
+Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
+
+List of platforms supported by TDengine client and connectors
+
+At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
+
+Comparison matrix as following:
+
+| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Shenwei** | **X64 TimecomTech** |
+| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
+| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
+| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
+| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
+| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
+| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
+| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
+| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
+| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
+
+Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
+
+Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information.
diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce8dd6c8be75ae87afcd51fbbecbaf97a274ba3e
--- /dev/null
+++ b/documentation20/en/03.architecture/docs.md
@@ -0,0 +1,434 @@
+# Data Model and Architecture
+
+## Data Model
+
+### A Typical IoT Scenario
+
+In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
+
+
+
+ Device ID
+ Time Stamp
+ Collected Metrics
+ Tags
+
+
+
+Device ID
+Time Stamp
+current
+voltage
+phase
+location
+groupId
+
+
+
+
+d1001
+1538548685000
+10.3
+219
+0.31
+Beijing.Chaoyang
+2
+
+
+d1002
+1538548684000
+10.2
+220
+0.23
+Beijing.Chaoyang
+3
+
+
+d1003
+1538548686500
+11.5
+221
+0.35
+Beijing.Haidian
+3
+
+
+d1004
+1538548685500
+13.4
+223
+0.29
+Beijing.Haidian
+2
+
+
+d1001
+1538548695000
+12.6
+218
+0.33
+Beijing.Chaoyang
+2
+
+
+d1004
+1538548696600
+11.8
+221
+0.28
+Beijing.Haidian
+2
+
+
+d1002
+1538548696650
+10.3
+218
+0.25
+Beijing.Chaoyang
+3
+
+
+d1001
+1538548696800
+12.3
+221
+0.31
+Beijing.Chaoyang
+2
+
+
+
+
+ Table 1: Smart meter example data
+
+Each data record contains the device ID, timestamp, collected metrics (current, voltage, phase as above), and static tags (Location and groupId in Table 1) associated with the devices. Each device generates a data record in a pre-defined timer or triggered by an external event. It is a sequence of data points like a stream.
+
+### Data Characteristics
+
+As the data points are a series of data points over time, the data points generated by IoT, Internet of Vehicles, and Operation Monitoring have some strong common characteristics:
+
+1. Metrics are always structured data;
+2. There are rarely delete/update operations on collected data;
+3. No need for transactions of traditional databases
+4. The ratio of reading is lower but write is higher than typical Internet applications;
+5. data flow is uniform and can be predicted according to the number of devices and collection frequency;
+6. the user pays attention to the trend of data, not a specific value at a specific time;
+7. there is always a data retention policy;
+8. the data query is always executed in a given time range and a subset of space;
+9. in addition to storage and query operations, various statistical and real-time calculation operations are also required;
+10. data volume is huge, a system may generate over 10 billion data points in a day.
+
+By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
+
+### Relational Database Model
+
+Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQL’s key-value storage.
+
+### One Table for One Collection Point
+
+To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
+
+1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
+2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed.
+3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
+
+If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.**
+
+TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
+
+### STable: A Collection of Data Points in the Same Type
+
+The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine.
+
+STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
+
+In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
+
+When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation.
+
+## Cluster and Primary Logic Unit
+
+The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment.
+
+### Primary Logic Unit
+
+Logical structure diagram of TDengine distributed architecture as following:
+
+
+ Picture 1: TDengine architecture diagram
+
+
+
+A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit.
+
+**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
+
+**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
+
+**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node.
+
+**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
+
+**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
+
+**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
+
+### Node Communication
+
+**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission.
+
+**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located.
+
+**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
+
+**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted.
+
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
+
+**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
+
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
+
+**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes.
+
+### A Typical Messaging Process
+
+To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process.
+
+
+ Picture 2 typical process of TDengine
+
+1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
+2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode.
+3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode.
+4. Taosc initiates an insert request to master vnode.
+5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup.
+6. Taosc notifies APP that writing is successful.
+
+For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode.
+
+For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node.
+
+The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications.
+
+Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache.
+
+## Storage Model and Data Partitioning/Sharding
+
+### Storage Model
+
+The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts:
+
+- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance.
+- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds.
+- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
+
+Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
+
+- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate.
+- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
+
+### Data Sharding
+
+For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
+
+VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application.
+
+For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
+
+When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
+
+The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations.
+
+### Data Partitioning
+
+In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter ‘keep’), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage.
+
+In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out.
+
+### Load Balancing
+
+Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected.
+
+If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number.
+
+When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process.
+
+The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
+
+## Data Writing and Replication Process
+
+If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect.
+
+### Master vnode Writing Process
+
+Master Vnode uses a writing process as follows:
+
+Figure 3: TDengine Master writing process
+
+1. Master vnode receives the application data insertion request, verifies, and to next step;
+2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
+3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
+4. Write into memory and add the record to “skip list”;
+5. Master vnode returns a confirmation message to the application, indicating a successful writing.
+6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
+
+### Slave vnode Writing Process
+
+For a slave vnode, the write process as follows:
+
+
+ Picture 3 TDengine Slave Writing Process
+
+1. Slave vnode receives a data insertion request forwarded by Master vnode.
+2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
+3. Write into memory and add the record to “skip list”;
+
+Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL.
+
+### Remote Disaster Recovery and IDC Migration
+
+As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
+
+On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
+
+However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows:
+
+1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down;
+2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
+3. Slave vnode will become the new master, thus losing one record
+
+In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
+
+Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
+
+### Master/slave Selection
+
+Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one.
+
+When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
+
+1. If there’s only one replica, it’s always master
+2. When all replicas are online, the one with latest version is master
+3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
+4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master
+
+See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/documentation/architecture/replica/) for more information on the data replication process.
+
+### Synchronous Replication
+
+For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
+
+With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
+
+## Caching and Persistence
+
+### Caching
+
+TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
+
+TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system.
+
+Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”.
+
+### Persistent Storage
+
+TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth.
+
+To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
+
+For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
+
+Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
+
+In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
+
+Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
+
+When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
+
+### Tiered Storage
+
+By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
+
+
+
+dataDir format is as follows:
+```
+dataDir data_path [tier_level]
+```
+
+Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
+
+
+
+Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
+
+```
+dataDir /mnt/disk1/taos
+dataDir /mnt/disk2/taos 0
+dataDir /mnt/disk3/taos 1
+dataDir /mnt/disk4/taos 1
+dataDir /mnt/disk5/taos 2
+dataDir /mnt/disk6/taos 2
+```
+
+Mounted disks can also be a non-local network disk, as long as the system can access it.
+
+
+Note: Tiered Storage is only supported in Enterprise Edition
+
+## Data Query
+
+TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode.
+
+### Single Table Query
+
+The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode).
+
+According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time.
+
+When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client.
+
+### Aggregation by Time Axis, Downsampling, Interpolation
+
+The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine.
+
+The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example:
+
+```mysql
+select count(*) from d1001 interval(1h);
+```
+
+According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window.
+
+
+
+In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example:
+
+```mysql
+select count(*) from d1001 interval(1h) fill(prev);
+```
+
+According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
+
+### Multi-table Aggregation Query
+
+TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
+
+
+ Picture 4 Diagram of multi-table aggregation query
+
+1. Application sends a query condition to system;
+2. taosc sends the STable name to Meta Node(management node);
+3. Management node sends the vnode list owned by the STable back to taosc;
+4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
+5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc;
+6. taosc finally aggregates the results returned by multiple data nodes and send them back to application.
+
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+
+### Precomputation
+
+In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL.
diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ab5e0c6a56d0b5d534386752988cb1adae3b2fa
--- /dev/null
+++ b/documentation20/en/04.model/docs.md
@@ -0,0 +1,74 @@
+# Data Modeling
+
+TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts.
+
+Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling.
+
+## Create a Database
+
+Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example:
+
+```mysql
+CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
+```
+
+The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).
+
+After the database created, please use SQL command USE to switch to the new database, for example:
+
+```mysql
+USE power;
+```
+
+Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use.
+
+**Note:**
+
+- Any table or STable belongs to a database. Before creating a table, a database must be created first.
+- Tables in two different databases cannot be JOIN.
+
+## Create a STable
+
+An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
+
+```mysql
+CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
+```
+
+**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
+
+Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details.
+
+Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp).
+
+A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables.
+
+## Create a Table
+
+TDengine builds a table independently for each data collection point. Similar to standard relational data, one table has a table name, Schema, but in addition, it can also carry one or more tags. When creating, you need to use the STable as a template and specify the specific value of the tag. Taking the smart meter in Table 1 as an example, the following SQL command can be used to build the table:
+
+```mysql
+CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
+```
+
+Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details.
+
+**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table.
+
+TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value.
+
+**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example:
+
+```mysql
+INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
+```
+
+The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time.
+
+For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section.
+
+## Multi-column Model vs Single-column Model
+
+TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase.
+
+TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended.
diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..88746ea60867b37e5956075f88c48ebd8276dfaa
--- /dev/null
+++ b/documentation20/en/05.insert/docs.md
@@ -0,0 +1,282 @@
+# Efficient Data Writing
+
+TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
+
+## SQL Writing
+
+Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
+
+```mysql
+INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
+```
+
+TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
+
+```mysql
+INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
+```
+
+TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
+
+```mysql
+INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
+```
+
+For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)。
+
+**Tips:**
+
+- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
+- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
+- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
+- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
+
+## Direct Writing of Prometheus
+
+As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
+
+### Compile blm_prometheus From Source
+
+Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
+
+- A server running Linux OS
+- Golang version 1.10 and higher installed
+- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
+
+Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
+
+```bash
+cd blm_prometheus
+
+go build
+```
+
+If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
+
+### Install Prometheus
+
+Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
+
+### Configure Prometheus
+
+Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
+
+- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
+
+After Prometheus launched, you can check whether data is written successfully through query taos client.
+
+### Launch blm_prometheus
+
+blm_prometheus has following options that you can configure when you launch blm_prometheus.
+
+```sh
+--tdengine-name
+
+If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
+
+--batch-size
+
+blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
+
+--dbname
+
+Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
+
+--dbuser
+
+Set the user name to access TDengine, the default value is'root '
+
+--dbpassword
+
+Set the password to access TDengine, the default value is'taosdata '
+
+--port
+
+The port number blm_prometheus used to serve prometheus.
+```
+
+
+
+### Example
+
+Launch an API service for blm_prometheus with the following command:
+
+```bash
+./blm_prometheus -port 8088
+```
+
+Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
+
+remote_write:
+
+\- url: "http://10.1.2.3:8088/receive"
+
+
+
+### Query written data of prometheus
+
+The format of generated data by Prometheus is as follows:
+
+```json
+{
+ Timestamp: 1576466279341,
+ Value: 37.000000,
+ apiserver_request_latencies_bucket {
+ component="apiserver",
+ instance="192.168.99.116:8443",
+ job="kubernetes-apiservers",
+ le="125000",
+ resource="persistentvolumes", s
+ cope="cluster",
+ verb="LIST",
+ version=“v1"
+ }
+}
+```
+
+Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
+
+```mysql
+use prometheus;
+
+select * from apiserver_request_latencies_bucket;
+```
+
+
+
+## Direct Writing of Telegraf
+
+[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
+
+### Compile blm_telegraf From Source Code
+
+Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
+
+- A server running Linux OS
+- Golang version 1.10 and higher installed
+- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
+
+Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
+
+```bash
+cd blm_telegraf
+
+go build
+```
+
+If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
+
+### Install Telegraf
+
+At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
+
+### Configure Telegraf
+
+Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
+
+In the output plugins section, add the [[outputs.http]] configuration:
+
+- url: The URL provided by bailongma API service, please refer to the example section below
+- data_format: "json"
+- json_timestamp_units: "1ms"
+
+In agent section:
+
+- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
+- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
+
+For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
+
+### Launch blm_telegraf
+
+blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
+
+```sh
+--host
+
+The ip address of TDengine server, default is null
+
+--batch-size
+
+blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
+
+--dbname
+
+Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
+
+--dbuser
+
+Set the user name to access TDengine, the default value is 'root '
+
+--dbpassword
+
+Set the password to access TDengine, the default value is'taosdata '
+
+--port
+
+The port number blm_telegraf used to serve Telegraf.
+```
+
+
+
+### Example
+
+Launch an API service for blm_telegraf with the following command
+
+```bash
+./blm_telegraf -host 127.0.0.1 -port 8089
+```
+
+Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
+
+```yaml
+url = "http://10.1.2.3:8089/telegraf"
+```
+
+### Query written data of telegraf
+
+The format of generated data by telegraf is as follows:
+
+```json
+{
+ "fields": {
+ "usage_guest": 0,
+ "usage_guest_nice": 0,
+ "usage_idle": 89.7897897897898,
+ "usage_iowait": 0,
+ "usage_irq": 0,
+ "usage_nice": 0,
+ "usage_softirq": 0,
+ "usage_steal": 0,
+ "usage_system": 5.405405405405405,
+ "usage_user": 4.804804804804805
+ },
+
+ "name": "cpu",
+ "tags": {
+ "cpu": "cpu2",
+ "host": "bogon"
+ },
+ "timestamp": 1576464360
+}
+```
+
+Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
+
+```mysql
+use telegraf;
+
+select * from cpu;
+```
+
+MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
+
+## Direct Writing of EMQ Broker
+
+[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
+
+
+
+## Direct Writing of HiveMQ Broker
+
+[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4f1359820a28b390e84be93e077fecb1d5ede0e
--- /dev/null
+++ b/documentation20/en/06.queries/docs.md
@@ -0,0 +1,99 @@
+# Efficient Data Querying
+
+## Main Query Features
+
+TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
+
+- Single-column and multi-column data query
+- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
+- Group by, Order by, Limit/Offset of aggregation results
+- Four operations for numeric columns and aggregation results
+- Time stamp aligned join query (implicit join) operations
+- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
+
+For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
+
+```mysql
+taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
+ ts | current | voltage | phase |
+======================================================================================
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
+ 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
+Query OK, 2 row(s) in set (0.001100s)
+```
+
+In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
+
+For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
+
+## Multi-table Aggregation Query
+
+In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
+
+**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
+
+```mysql
+taos> SELECT AVG(voltage) FROM meters GROUP BY location;
+ avg(voltage) | location |
+=============================================================
+ 222.000000000 | Beijing.Haidian |
+ 219.200000000 | Beijing.Chaoyang |
+Query OK, 2 row(s) in set (0.002136s)
+```
+
+**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
+
+```mysql
+taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
+ cunt(*) | max(current) |
+==================================
+ 5 | 13.4 |
+Query OK, 1 row(s) in set (0.002136s)
+```
+
+TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
+
+## Down Sampling Query, Interpolation
+
+In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
+
+```mysql
+taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
+ ts | sum(current) |
+======================================================
+ 2018-10-03 14:38:00.000 | 10.300000191 |
+ 2018-10-03 14:38:10.000 | 24.900000572 |
+Query OK, 2 row(s) in set (0.000883s)
+```
+
+The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
+
+```mysql
+taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
+ ts | sum(current) |
+======================================================
+ 2018-10-03 14:38:04.000 | 10.199999809 |
+ 2018-10-03 14:38:05.000 | 32.900000572 |
+ 2018-10-03 14:38:06.000 | 11.500000000 |
+ 2018-10-03 14:38:15.000 | 12.600000381 |
+ 2018-10-03 14:38:16.000 | 36.000000000 |
+Query OK, 5 row(s) in set (0.001538s)
+```
+
+The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
+
+```mysql
+taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
+ ts | sum(current) |
+======================================================
+ 2018-10-03 14:38:04.500 | 11.189999809 |
+ 2018-10-03 14:38:05.500 | 31.900000572 |
+ 2018-10-03 14:38:06.500 | 11.600000000 |
+ 2018-10-03 14:38:15.500 | 12.300000381 |
+ 2018-10-03 14:38:16.500 | 35.000000000 |
+Query OK, 5 row(s) in set (0.001521s)
+```
+
+In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
+
+For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
\ No newline at end of file
diff --git a/documentation20/en/07.advanced-features/docs.md b/documentation20/en/07.advanced-features/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..cebbb4a269047b956359252bbdb71fb1a4ba8ff8
--- /dev/null
+++ b/documentation20/en/07.advanced-features/docs.md
@@ -0,0 +1,360 @@
+# Advanced Features
+
+## Continuous Query
+
+Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers. Continuous query can be applied to a table or a STable automatically and periodically, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding).
+
+Continuous query of TDengine adopts time-driven mode, which can be defined directly by TAOS SQL without additional operation. Using continuous query, results can be generated conveniently and quickly according to the time window, thus down sampling the original collected data. After the user defines a continuous query through TAOS SQL, TDengine automatically pulls up the query at the end of the last complete time period and pushes the calculated results to the user or writes them back to TDengine.
+
+The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways:
+
+- Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59.
+- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated.
+- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback.
+
+### How to use continuous query
+
+The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
+
+```sql
+create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
+create table D1001 using meters tags ("Beijing.Chaoyang", 2);
+create table D1002 using meters tags ("Beijing.Haidian", 2);
+...
+```
+
+We already know that the average voltage of these meters can be counted with one minute as the time window and 30 seconds as the forward increment through the following SQL statement.
+
+```sql
+select avg(voltage) from meters interval(1m) sliding(30s);
+```
+
+Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly:
+
+```sql
+select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
+```
+
+There is no problem with this, but TDengine provides a simpler method, just add `create table {tableName} as` before the initial query statement, for example:
+
+```sql
+create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
+```
+
+A new table named `avg_vol` will be automatically created, and then every 30 seconds, TDengine will incrementally execute the SQL statement after `as` and write the query result into this table. The user program only needs to query the data from `avg_vol`. For example:
+
+```mysql
+taos> select * from avg_vol;
+ ts | avg_voltage_ |
+===================================================
+ 2020-07-29 13:37:30.000 | 222.0000000 |
+ 2020-07-29 13:38:00.000 | 221.3500000 |
+ 2020-07-29 13:38:30.000 | 220.1700000 |
+ 2020-07-29 13:39:00.000 | 223.0800000 |
+```
+
+It should be noted that the minimum value of the query time window is 10 milliseconds, and there is no upper limit of the time window range.
+
+In addition, TDengine also supports users to specify the starting and ending times of a continuous query. If the start time is not entered, the continuous query will start from the time window where the first original data is located; If no end time is entered, the continuous query will run permanently; If the user specifies an end time, the continuous query stops running after the system time reaches the specified time. For example, a continuous query created with the following SQL will run for one hour and then automatically stop.
+
+```mysql
+create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
+```
+
+It should be noted that now in the above example refers to the time when continuous queries are created, not the time when queries are executed, otherwise, queries cannot be stopped automatically. In addition, in order to avoid the problems caused by delayed writing of original data as much as possible, there is a certain delay in the calculation of continuous queries in TDengine. In other words, after a time window has passed, TDengine will not immediately calculate the data of this window, so it will take a while (usually not more than 1 minute) to find the calculation result.
+
+### Manage the Continuous Query
+
+Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands.
+
+## Publisher/Subscriber
+
+Based on the natural time-series characteristics of data, the data insert of TDengine is logically consistent with the data publish (pub) of messaging system, which can be regarded as a new record inserted with timestamp in the system. At the same time, TDengine stores data in strict accordance with the monotonous increment of time-series. Essentially, every table in TDengine can be regarded as a standard messaging queue.
+
+TDengine supports embedded lightweight message subscription and publishment services. Using the API provided by the system, users can subscribe to one or more tables in the database using common query statements. The maintenance of subscription logic and operation status is completed by the client. The client regularly polls the server for whether new records arrive, and the results will be fed back to the client when new records arrive.
+
+The status of the subscription and publishment services of TDengine is maintained by the client, but not by the TDengine server. Therefore, if the application restarts, it is up to the application to decide from which point of time to obtain the latest data.
+
+In TDengine, there are three main APIs relevant to subscription:
+
+```c
+taos_subscribe
+taos_consume
+taos_unsubscribe
+```
+
+Please refer to the [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/) for the documentation of these APIs. The following is still a smart meter scenario as an example to introduce their specific usage (please refer to the previous section "Continuous Query" for the structure of STables and sub-tables). The complete sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c).
+
+If we want to be notified and do some process when the current of a smart meter exceeds a certain limit (e.g. 10A), there are two methods: one is to query each sub-table separately, record the timestamp of the last piece of data after each query, and then only query all data after this timestamp:
+
+```sql
+select * from D1001 where ts > {last_timestamp1} and current > 10;
+select * from D1002 where ts > {last_timestamp2} and current > 10;
+...
+```
+
+This is indeed feasible, but as the number of meters increases, the number of queries will also increase, and the performance of both the client and the server will be affected, until the system cannot afford it.
+
+Another method is to query the STable. In this way, no matter how many meters there are, only one query is required:
+
+```sql
+select * from meters where ts > {last_timestamp} and current > 10;
+```
+
+However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed.
+
+The subscription function of TDengine provides a thorough solution to the above problem.
+
+First, use `taos_subscribe` to create a subscription:
+
+```c
+TAOS_SUB* tsub = NULL;
+if (async) {
+ // create an asynchronized subscription, the callback function will be called every 1s
+ tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
+} else {
+ // create an synchronized subscription, need to call 'taos_consume' manually
+ tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
+}
+```
+
+Subscriptions in TDengine can be either synchronous or asynchronous, and the above code will decide which method to use based on the value of parameter `async` obtained from the command line. Here, synchronous means that the user program calls `taos_consume` directly to pull data, while asynchronous means that the API calls `taos_consume` in another internal thread, and then gives the pulled data to the callback function `subscribe_callback` for processing.
+
+Parameter `taos` is an established database connection and has no special requirements in synchronous mode. However, in asynchronous mode, it should be noted that it will not be used by other threads, otherwise it may lead to unpredictable errors, because the callback function is called in the internal thread of the API, while some APIs of TDengine are not thread-safe.
+
+Parameter `sql` is a query statement in which you can specify filters using where clause. In our example, if you only want to subscribe to data when the current exceeds 10A, you can write as follows:
+
+```sql
+select * from meters where current > 10;
+```
+
+Note that the starting time is not specified here, so the data of all timers will be read. If you only want to start subscribing from the data one day ago and do not need earlier historical data, you can add a time condition:
+
+```sql
+select * from meters where ts > now - 1d and current > 10;
+```
+
+The `topic` of the subscription is actually its name, because the subscription function is implemented in the client API, so it is not necessary to ensure that it is globally unique, but it needs to be unique on a client machine.
+
+If the subscription of name `topic` does not exist, the parameter restart is meaningless; However, if the user program exits after creating this subscription, when it starts again and reuses this `topic`, `restart` will be used to decide whether to read data from scratch or from the previous location. In this example, if `restart` is **true** (non-zero value), the user program will definitely read all the data. However, if this subscription exists before, and some data has been read, and `restart` is **false** (zero), the user program will not read the previously read data.
+
+The last parameter of `taos_subscribe` is the polling period in milliseconds. In synchronous mode, if the interval between the two calls to `taos_consume` is less than this time, `taos_consume` will block until the interval exceeds this time. In asynchronous mode, this time is the minimum time interval between two calls to the callback function.
+
+The penultimate parameter of `taos_subscribe` is used by the user program to pass additional parameters to the callback function, which is passed to the callback function as it is without any processing by the subscription API. This parameter is meaningless in sync mode.
+
+After created, the subscription can consume data. In synchronous mode, the sample code is the following as the `else` section:
+
+```c
+if (async) {
+ getchar();
+} else while(1) {
+ TAOS_RES* res = taos_consume(tsub);
+ if (res == NULL) {
+ printf("failed to consume data.");
+ break;
+ } else {
+ print_result(res, blockFetch);
+ getchar();
+ }
+}
+```
+
+Here is a **while** loop. Every time the user presses the Enter key, `taos_consume` is called, and the return value of `taos_consume` is the query result set, which is exactly the same as `taos_use_result`. In the example, the code using this result set is the function `print_result`:
+
+```c
+void print_result(TAOS_RES* res, int blockFetch) {
+ TAOS_ROW row = NULL;
+ int num_fields = taos_num_fields(res);
+ TAOS_FIELD* fields = taos_fetch_fields(res);
+ int nRows = 0;
+ if (blockFetch) {
+ nRows = taos_fetch_block(res, &row);
+ for (int i = 0; i < nRows; i++) {
+ char temp[256];
+ taos_print_row(temp, row + i, fields, num_fields);
+ puts(temp);
+ }
+ } else {
+ while ((row = taos_fetch_row(res))) {
+ char temp[256];
+ taos_print_row(temp, row, fields, num_fields);puts(temp);
+ nRows++;
+ }
+ }
+ printf("%d rows consumed.\n", nRows);
+}
+```
+
+Among them, `taos_print_row` is used to process subscription to data. In our example, it will print out all eligible records. In asynchronous mode, it is simpler to consume subscribed data:
+
+```c
+void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
+ print_result(res, *(int*)param);
+}
+```
+
+To end a data subscription, you need to call `taos_unsubscribe`:
+
+```c
+taos_unsubscribe(tsub, keep);
+```
+
+Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time.
+
+After introducing the code, let's take a look at the actual running effect. For exmaple:
+
+- Sample code has been downloaded locally
+- TDengine has been installed on the same machine
+- All the databases, STables and sub-tables required by the example have been created
+
+You can compile and start the sample program by executing the following command in the directory where the sample code is located:
+
+```shell
+$ make
+$ ./subscribe -sql='select * from meters where current > 10;'
+```
+
+After the sample program starts, open another terminal window, and the shell that starts TDengine inserts a data with a current of 12A into **D1001**:
+
+```shell
+$ taos
+> use test;
+> insert into D1001 values(now, 12, 220, 1);
+```
+
+At this time, because the current exceeds 10A, you should see that the sample program outputs it to the screen. You can continue to insert some data to observe the output of the sample program.
+
+### Use data subscription in Java
+
+The subscription function also provides a Java development interface, as described in [Java Connector](https://www.taosdata.com/cn/documentation/connector/). It should be noted that the Java interface does not provide asynchronous subscription mode at present, but user programs can achieve the same feature by creating TimerTask.
+
+The following is an example to introduce its specific use. The function it completes is basically the same as the C language example described earlier, and it is also to subscribe to all records with current exceeding 10A in the database.
+
+#### Prepare data
+
+```sql
+# Create power Database
+taos> create database power;
+# Switch to the database
+taos> use power;
+# Create a STable
+taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
+# Create tables
+taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
+taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
+# Insert test data
+taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
+taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
+# Query all records with current over 10A from STable meters
+taos> select * from meters where current > 10;
+ ts | current | voltage | phase | location | groupid |
+===========================================================================================================
+ 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
+ 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
+ 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
+ 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
+ 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
+Query OK, 5 row(s) in set (0.004896s)
+```
+
+#### Example
+
+```java
+public class SubscribeDemo {
+ private static final String topic = "topic-meter-current-bg-10";
+ private static final String sql = "select * from meters where current > 10";
+
+ public static void main(String[] args) {
+ Connection connection = null;
+ TSDBSubscribe subscribe = null;
+
+ try {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata";
+ connection = DriverManager.getConnection(jdbcUrl, properties);
+ subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); // Create a subscription
+ int count = 0;
+ while (count < 10) {
+ TimeUnit.SECONDS.sleep(1); / Wait 1 second to avoid calling consume too frequently and causing pressure on server
+ TSDBResultSet resultSet = subscribe.consume(); // 消费数据
+ if (resultSet == null) {
+ continue;
+ }
+ ResultSetMetaData metaData = resultSet.getMetaData();
+ while (resultSet.next()) {
+ int columnCount = metaData.getColumnCount();
+ for (int i = 1; i <= columnCount; i++) {
+ System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t");
+ }
+ System.out.println();
+ count++;
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ try {
+ if (null != subscribe)
+ subscribe.close(true); // Close the subscription
+ if (connection != null)
+ connection.close();
+ } catch (SQLException throwables) {
+ throwables.printStackTrace();
+ }
+ }
+ }
+}
+```
+
+Run the sample program. First, it consumes all the historical data that meets the query conditions:
+
+```shell
+# java -jar subscribe.jar
+
+ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
+ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
+ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
+ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
+ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
+```
+
+Then, add a piece of data to the table via taos client:
+
+```sql
+# taos
+taos> use power;
+taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
+```
+
+Because the current of this data is greater than 10A, the sample program will consume it:
+
+```shell
+ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
+```
+
+## Cache
+
+TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a write-driven cache management mechanism. This strategy is different from the read-driven data cache mode (Least-Recent-Use, LRU), which directly saves the most recently written data in the system buffer. When the buffer reaches a threshold, the oldest data is written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the recently generated data, that is, the current status. TDengine takes full advantage of this feature by storing the most recently arrived (current status) data in the buffer.
+
+TDengine provides data collection in milliseconds to users through query functions. Saving the recently arrived data directly in buffer can respond to the user's query analysis for the latest piece or batch of data more quickly, and provide faster database query response as a whole. In this way, TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying additional caching systems, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the cached data will not reload the previously cached data into the buffer like some proprietary Key-value cache system.
+
+TDengine allocates a fixed size of memory space as a buffer, which can be configured according to application requirements and hardware resources. By properly setting the buffer space, TDengine can provide extremely high-performance write and query support. Each virtual node in TDengine is allocated a separate cache pool when it is created. Each virtual node manages its own cache pool, and different virtual nodes do not share the pool. All tables belonging to each virtual node share the cache pool owned by itself.
+
+TDengine manages the memory pool by blocks, and the data is stored in the form of rows within. The memory pool of a vnode is allocated by blocks when the vnode is created, and each memory block is managed according to the First-In-First-Out strategy. When creating a memory pool, the size of the blocks is determined by the system configuration parameter cache; The number of memory blocks in each vnode is determined by the configuration parameter blocks. So for a vnode, the total memory size is: cache * blocks. A cache block needs to ensure that each table can store at least dozens of records in order to be efficient.
+
+You can quickly obtain the last record of a table or a STable through the function last_row, which is very convenient to show the real-time status or collected values of each device on a large screen. For example:
+
+```mysql
+select last_row(voltage) from meters where location='Beijing.Chaoyang';
+```
+
+This SQL statement will obtain the last recorded voltage value of all smart meters located in Chaoyang District, Beijing.
+
+## Alert
+
+In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
+
+In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
\ No newline at end of file
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..9cbd3952068d8eac23ffa9bcd7497ff158a21d86
--- /dev/null
+++ b/documentation20/en/08.connector/docs.md
@@ -0,0 +1,1046 @@
+# Connectors
+
+TDengine provides many connectors for development, including C/C++, JAVA, Python, RESTful, Go, Node.JS, etc.
+
+
+
+At present, TDengine connectors support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. The comparison matrix is as follows:
+
+| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Sunway** | **X64 TimecomTech** |
+| ----------- | ------------- | ------------- | ------------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
+| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
+| **C/C++** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ |
+| **JDBC** | ● | ● | ● | ○ | ● | ● | ○ | ○ | ○ |
+| **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ |
+| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
+| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
+| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
+| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
+
+Note: ● stands for that has been verified by official tests; ○ stands for that has been verified by unofficial tests.
+
+Note:
+
+- To access the TDengine database through connectors (except RESTful) in the system without TDengine server software, it is necessary to install the corresponding version of the client installation package to make the application driver (the file name is [libtaos.so](http://libtaos.so/) in Linux system and taos.dll in Windows system) installed in the system, otherwise, the error that the corresponding library file cannot be found will occur.
+- All APIs that execute SQL statements, such as `tao_query`, `taos_query_a`, `taos_subscribe` in C/C++ Connector, and APIs corresponding to them in other languages, can only execute one SQL statement at a time. If the actual parameters contain multiple statements, their behavior is undefined.
+- Users upgrading to TDengine 2.0. 8.0 must update the JDBC connection. TDengine must upgrade taos-jdbcdriver to 2.0.12 and above.
+- No matter which programming language connector is selected, TDengine version 2.0 and above recommends that each thread of database application establish an independent connection or establish a connection pool based on threads to avoid mutual interference between threads of "USE statement" state variables in the connection (but query and write operations of the connection are thread-safe).
+
+## Steps of Connector Driver Installation
+
+The server should already have the TDengine server package installed. The connector driver installation steps are as follows:
+
+**Linux**
+
+**1. Download from TAOS Data website(https://www.taosdata.com/cn/all-downloads/)**
+
+* X64 hardware environment: TDengine-client-2.x.x.x-Linux-x64.tar.gz
+* ARM64 hardware environment: TDengine-client-2.x.x.x-Linux-aarch64.tar.gz
+* ARM32 hardware environment: TDengine-client-2.x.x.x-Linux-aarch32.tar.gz
+
+**2. Unzip the package**
+
+Place the package in any directory that current user can read/write, and then execute following command:
+
+`tar -xzvf TDengine-client-xxxxxxxxx.tar.gz`
+
+Where xxxxxx needs to be replaced with you actual version as a string.
+
+**3. Execute installation script**
+
+After extracting the package, you will see the following files (directories) in the extracting directory:
+
+*install_client. sh*: Installation script for application driver
+
+*taos.tar.gz*: Application driver installation package
+
+*driver*: TDengine application driver
+
+*connector*: Connectors for various programming languages (go/grafanaplugin/nodejs/python/JDBC)
+
+*Examples*: Sample programs for various programming languages (C/C #/go/JDBC/MATLAB/python/R)
+
+Run install_client.sh to install.
+
+**4. Configure taos.cfg**
+
+Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030.
+
+**Tip: If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.**
+
+**Windows x64/x86**
+
+**1. Download from TAOS Data website(https://www.taosdata.com/cn/all-downloads/)**
+
+* X64 hardware environment: TDengine-client-2.X.X.X-Windows-x64.exe
+* X86 hardware environment: TDengine-client-2.X.X.X-Windows-x86.exe
+
+**2. Execute installation, select default values as prompted to complete**
+
+**3. Installation path**
+
+Default installation path is: C:\TDengine, with following files(directories):
+
+*taos.exe*: taos shell command line program
+
+*cfg*: configuration file directory
+
+*driver*: application driver dynamic link library
+
+*examples*: sample program bash/C/C #/go/JDBC/Python/Node.js
+
+*include*: header file
+
+*log*: log file
+
+*unins000. exe*: uninstall program
+
+**4. Configure taos.cfg**
+
+Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030.
+
+**Note:**
+
+**1. If you use FQDN to connect to the server, you must confirm that the DNS of the local network environment has been configured, or add FQDN addressing records in the hosts file. For example, edit C:\ Windows\ system32\ drivers\ etc\ hosts, and add the following record: 192.168. 1.99 [h1.taos.com](http://h1.taos.com/)**
+
+**2. Uninstall: Run unins000. exe to uninstall the TDengine application driver.**
+
+**Installation verification**
+
+After the above installation and configuration completed, and confirm that the TDengine service has started running normally, the taos client can be logged in at this time.
+
+**Linux environment:**
+
+If you execute taos directly under Linux shell, you should be able to connect to tdengine service normally and jump to taos shell interface. For Example:
+
+```mysql
+$ taos
+Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
+Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
+taos> show databases;
+name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
+=========================================================================================================================================================================================================================
+test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
+log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
+Query OK, 2 row(s) in set (0.001198s)
+taos>
+```
+
+**Windows (x64/x86) environment:**
+
+Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example:
+
+```mysql
+ C:\TDengine>taos
+ Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
+ Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
+ taos> show databases;
+ name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
+ ===================================================================================================================================================================================================================================================================
+ test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
+ log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
+ Query OK, 2 row(s) in set (0.045000s)
+ taos>
+```
+
+## C/C++ Connector
+
+**Systems supported by C/C++ connectors as follows:**
+
+| **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** |
+| -------------------- | ---------------------------- | ------- | ------- | --------- | ------------------ |
+| **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
+| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** |
+
+The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
+
+```C
+#include
+```
+
+Note:
+
+- The TDengine dynamic library needs to be linked at compiling. The library in Linux is [libtaos.so](http://libtaos.so/), which installed at/usr/local/taos/driver. By Windows, it is taos.dll and installed at C:\ TDengine.
+- Unless otherwise specified, when the return value of API is an integer, 0 represents success, others are error codes representing the cause of failure, and when the return value is a pointer, NULL represents failure.
+
+More sample codes for using C/C++ connectors, please visit https://github.com/taosdata/TDengine/tree/develop/tests/examples/c.
+
+### Basic API
+
+The basic API is used to create database connections and provide a runtime environment for the execution of other APIs.
+
+- `void taos_init()`
+
+Initialize the running environment. If the application does not actively call the API, the API will be automatically called when the application call taos_connect, so the application generally does not need to call the API manually.
+
+- `void taos_cleanup()`
+
+Clean up the running environment and call this API before the application exits.
+
+- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
+
+Set client options, currently only time zone setting (_TSDB_OPTIONTIMEZONE) and encoding setting (_TSDB_OPTIONLOCALE) are supported. The time zone and encoding default to the current operating system settings.
+
+- `char *taos_get_client_info()`
+
+Get version information of the client.
+
+- `TAOS *taos_connect(const char *host, const char *user, const char *pass, const char *db, int port)`
+
+Create a database connection and initialize the connection context. The parameters that need to be provided by user include:
+
+* host: FQDN used by TDengine to manage the master node
+* user: User name
+* pass: Password
+* db: Database name. If user does not provide it, it can be connected normally, means user can create a new database through this connection. If user provides a database name, means the user has created the database and the database is used by default
+* port: Port number
+
+A null return value indicates a failure. The application needs to save the returned parameters for subsequent API calls.
+
+- `char *taos_get_server_info(TAOS *taos)`
+
+Get version information of the server-side.
+
+- `int taos_select_db(TAOS *taos, const char *db)`
+
+Set the current default database to db.
+
+- `void taos_close(TAOS *taos)`
+
+Close the connection, where `taos` is the pointer returned by `taos_connect` function.
+
+### Synchronous query API
+
+Traditional database operation APIs all make synchronous operations. After the application calls an API, it remains blocked until the server returns the result. TDengine supports the following APIs:
+
+- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
+
+This API is used to execute SQL statements, which can be DQL, DML or DDL statements. Where `taos` parameter is a pointer obtained through `taos_connect`. You can't judge whether the execution result fails by whether the return value is NULL, but to use `taos_errno` function to parse the error code in the result set.
+
+- `int taos_result_precision(TAOS_RES *res)`
+
+The precision of the timestamp field in the returned result set, `0` for milliseconds, `1` for microseconds, and `2` for nanoseconds.
+
+- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
+
+Get the data in the query result set by rows.
+
+- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)`
+
+The data in the query result set is obtained in batch, and the return value is the number of rows of the obtained data.
+
+- `int taos_num_fields(TAOS_RES *res)` 和 `int taos_field_count(TAOS_RES *res)`
+
+The two APIs are equivalent, and are used to get the number of columns in the query result set.
+
+- `int* taos_fetch_lengths(TAOS_RES *res)`
+
+Get the length of each field in the result set. The return value is an array whose length is the number of columns in the result set.
+
+- `int taos_affected_rows(TAOS_RES *res)`
+
+Get the number of rows affected by the executed SQL statement.
+
+- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
+
+Get the attributes (data type, name, number of bytes) of each column of data in the query result set, which can be used in conjunction with `taos_num_files` to parse the data of a tuple (one row) returned by `taos_fetch_row`. The structure of `TAOS_FIELD` is as follows:
+
+```c
+typedef struct taosField {
+ char name[65]; // Column name
+ uint8_t type; // Data type
+ int16_t bytes; // Number of bytes
+} TAOS_FIELD;
+```
+
+- `void taos_stop_query(TAOS_RES *res)`
+
+Stop the execution of a query.
+
+- `void taos_free_result(TAOS_RES *res)`
+
+Release the query result set and related resources. After the query is completed, be sure to call the API to release resources, otherwise it may lead to application memory leakage. However, it should also be noted that after releasing resources, if you call functions such as `taos_consume` to obtain query results, it will lead the application to Crash.
+
+- `char *taos_errstr(TAOS_RES *res)`
+
+Get the reason why the last API call failed, and the return value is a string.
+
+- `char *taos_errno(TAOS_RES *res)`
+
+Get the reason why the last API call failed, and the return value is the error code.
+
+**Note:** TDengine 2.0 and above recommends that each thread of a database application establish an independent connection or establish a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for sharing in applications. Query and write operations based on TAOS structure have multithread safety, but state variables such as "USE statement" may interfere with each other among threads. In addition, C connector can dynamically establish new database-oriented connections according to requirements (this process is not visible to users), and it is recommended to call `taos_close` to close the connection only when the program finally exits.
+
+### Asynchronous query API
+
+In addition to synchronous API, TDengine also provides higher performance asynchronous call API to handle data insertion and query operations. Under the same software and hardware environment, asynchronous API processes data insertion 2 ~ 4 times faster than synchronous API. Asynchronous API adopts a non-blocking call mode and returns immediately before the system really completes a given database operation. The calling thread can handle other work, thus improving the performance of the whole application. Asynchronous API has outstanding advantages in the case of poor network delay.
+
+Asynchronous APIs all need applications to provide corresponding callback function. The callback function parameters are set as follows: the first two parameters are consistent, and the third parameter depends on different APIs. The first parameter param is provided to the system when the application calls the asynchronous API. When used for callback, the application can retrieve the context of the specific operation, depending on the specific implementation. The second parameter is the result set of SQL operation. If it is empty, such as insert operation, it means that there is no record returned. If it is not empty, such as select operation, it means that there is record returned.
+
+Asynchronous APIs have relatively high requirements for users, who can selectively use them according to specific application scenarios. Here are three important asynchronous APIs:
+
+- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);`
+ Execute SQL statement asynchronously.
+
+ * taos: The database connection returned by calling `taos_connect`
+ * sql: The SQL statement needed to execute
+ * fp: User-defined callback function, whose third parameter `code` is used to indicate whether the operation is successful, `0` for success, and negative number for failure (call `taos_errstr` to get the reason for failure). When defining the callback function, it mainly handles the second parameter `TAOS_RES *`, which is the result set returned by the query
+ * param:the parameter for the callback
+
+- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);`
+ Get the result set of asynchronous queries in batch, which can only be used with `taos_query_a`. Within:
+
+ * res: The result set returned when backcall `taos_query_a`
+ * fp: Callback function. Its parameter `param` is a user-definable parameter construct passed to the callback function; `numOfRows` is the number of rows of data obtained (not a function of the entire query result set). In the callback function, applications can get each row of the batch records by calling `taos_fetch_rows` forward iteration. After reading all the records in a block, the application needs to continue calling `taos_fetch_rows_a` in the callback function to obtain the next batch of records for processing until the number of records returned (`numOfRows`) is zero (the result is returned) or the number of records is negative (the query fails).
+
+The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed.
+
+
+
+
+
+### Parameter binding API
+
+In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows:
+
+- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
+
+Create a `TAOS_STMT` object for calling later.
+
+- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
+
+Parse a SQL statement and bind the parsing result and parameter information to STMT. If the parameter length is greater than 0, this parameter will be used as the length of the SQL statement. If it is equal to 0, the length of the SQL statement will be automatically judged.
+
+- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
+
+For parameter binding, bind points to an array, and it is necessary to ensure that the number and order of elements in this array are exactly the same as the parameters in sql statement. TAOS_BIND is used in the same way as MYSQL_BIND in MySQL and is defined as follows:
+
+```c
+typedef struct TAOS_BIND {
+ int buffer_type;
+ void * buffer;
+ unsigned long buffer_length; // Not in use
+ unsigned long *length;
+ int * is_null;
+ int is_unsigned; // Not in use
+ int * error; // Not in use
+} TAOS_BIND;
+```
+
+Add the current bound parameters to the batch. After calling this function, you can call `taos_stmt_bind_param` again to bind the new parameters. It should be noted that this function only supports insert/import statements, and if it is other SQL statements such as select, it will return errors.
+
+- `int taos_stmt_execute(TAOS_STMT *stmt)`
+
+Execute the prepared statement. At the moment, a statement can only be executed once.
+
+- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
+
+Gets the result set of the statement. The result set is used in the same way as when calling nonparameterized. After using it, `taos_free_result` should be called to release resources.
+
+- `int taos_stmt_close(TAOS_STMT *stmt)`
+
+Execution completed, release all resources.
+
+### Continuous query interface
+
+TDengine provides time-driven real-time stream computing APIs. You can perform various real-time aggregation calculation operations on tables (data streams) of one or more databases at regular intervals. The operation is simple, only APIs for opening and closing streams. The details are as follows:
+
+- `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))`
+
+This API is used to create data streams where:
+
+ * taos: Database connection established
+ * sql: SQL query statement (query statement only)
+ * fp: user-defined callback function pointer. After each stream computing is completed, TDengine passes the query result (TAOS_ROW), query status (TAOS_RES), and user-defined parameters (PARAM) to the callback function. In the callback function, the user can use taos_num_fields to obtain the number of columns in the result set, and taos_fetch_fields to obtain the type of data in each column of the result set.
+ * stime: The time when stream computing starts. If it is 0, it means starting from now. If it is not zero, it means starting from the specified time (the number of milliseconds from 1970/1/1 UTC time).
+ * param: It is a parameter provided by the application for callback. During callback, the parameter is provided to the application
+ * callback: The second callback function is called when the continuous query stop automatically.
+
+The return value is NULL, indicating creation failed; the return value is not NULL, indicating creation successful.
+
+- `void taos_close_stream (TAOS_STREAM *tstr)`
+
+Close the data flow, where the parameter provided is the return value of `taos_open_stream`. When the user stops stream computing, be sure to close the data flow.
+
+### Data subscription interface
+
+The subscription API currently supports subscribing to one or more tables and continuously obtaining the latest data written to the tables through regular polling.
+
+- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
+
+This function is for starting the subscription service, returning the subscription object in case of success, and NULL in case of failure. Its parameters are:
+
+ * taos: Database connection established
+ * Restart: If the subscription already exists, do you want to start over or continue with the previous subscription
+ * Topic: Subject (that is, name) of the subscription. This parameter is the unique identification of the subscription
+ * sql: The query statement subscribed. This statement can only be a select statement. It should only query the original data, and can only query the data in positive time sequence
+ * fp: The callback function when the query result is received (the function prototype will be introduced later). It is only used when calling asynchronously, and this parameter should be passed to NULL when calling synchronously
+ * param: The additional parameter when calling the callback function, which is passed to the callback function as it is by the system API without any processing
+ * interval: Polling period in milliseconds. During asynchronous call, the callback function will be called periodically according to this parameter; In order to avoid affecting system performance, it is not recommended to set this parameter too small; When calling synchronously, if the interval between two calls to taos_consume is less than this period, the API will block until the interval exceeds this period.
+
+- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
+
+In asynchronous mode, the prototype of the callback function has the following parameters:
+
+ * tsub: Subscription object
+ * res: Query the result set. Note that there may be no records in the result set
+ * param: Additional parameters supplied by the client when `taos_subscribe` is called
+ * code: Error code
+
+- `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
+
+In synchronous mode, this function is used to get the results of subscription. The user application places it in a loop. If the interval between two calls to `taos_consume` is less than the polling cycle of the subscription, the API will block until the interval exceeds this cycle. If a new record arrives in the database, the API will return the latest record, otherwise it will return an empty result set with no records. If the return value is NULL, it indicates a system error. In asynchronous mode, user program should not call this API.
+
+- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
+
+Unsubscribe. If the parameter `keepProgress` is not 0, the API will keep the progress information of subscription, and the subsequent call to `taos_subscribe` can continue based on this progress; otherwise, the progress information will be deleted and the data can only be read again.
+
+## Python Connector
+
+See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for the use of Python connectors.
+
+### Installation preparation
+
+- For application driver installation, please refer to [steps of connector driver installation](https://www.taosdata.com/en/documentation/connector#driver)
+- python 2.7 or >= 3.4 installed
+- pip or pip3 installed
+
+### Python client installation
+
+#### Linux
+
+Users can find the connector package for python2 and python3 in the source code src/connector/python (or tar.gz/connector/python) folder. Users can install it through `pip` command:
+
+`pip install src/connector/python/linux/python2/`
+
+or
+
+ `pip3 install src/connector/python/linux/python3/`
+
+#### Windows
+
+With Windows TDengine client installed, copy the file "C:\TDengine\driver\taos.dll" to the "C:\ windows\ system32" directory and enter the Windows cmd command line interface:
+
+```cmd
+cd C:\TDengine\connector\python
+python -m pip install .
+```
+
+- If there is no `pip` command on the machine, the user can copy the taos folder under src/connector/python to the application directory for use. For Windows client, after installing the TDengine Windows client, copy C:\ TDengine\driver\taos.dll to the C:\ windows\ system32 directory.
+
+### How to use
+
+#### Code sample
+
+- Import the TDengine client module
+
+```python
+import taos
+```
+
+- Get the connection and cursor object
+
+```python
+conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
+c1 = conn.cursor()
+```
+
+- *host* covers all IPs of TDengine server-side, and *config* is the directory where the client configuration files is located
+- Write data
+
+```python
+import datetime
+
+# Create a database
+c1.execute('create database db')
+c1.execute('use db')
+# Create a table
+c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
+# Insert data
+start_time = datetime.datetime(2019, 11, 1)
+affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
+# Insert data in batch
+time_interval = datetime.timedelta(seconds=60)
+sqlcmd = ['insert into tb values']
+for irow in range(1,11):
+ start_time += time_interval
+ sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
+affected_rows = c1.execute(' '.join(sqlcmd))
+```
+
+- Query data
+
+```python
+c1.execute('select * from tb')
+# pull query result
+data = c1.fetchall()
+# The result is a list, with each row as an element
+numOfRows = c1.rowcount
+numOfCols = len(c1.description)
+for irow in range(numOfRows):
+ print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
+
+# Use cursor loop directly to pull query result
+c1.execute('select * from tb')
+for data in c1:
+ print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
+```
+
+- Create subscription
+
+```python
+# Create a subscription with the topic ‘test’ and a consumption cycle of 1000 milliseconds
+# If the first parameter is True, it means restarting the subscription. If it is False and a subscription with the topic 'test 'has been created before, it means continuing to consume the data of this subscription instead of restarting to consume all the data
+sub = conn.subscribe(True, "test", "select * from tb;", 1000)
+```
+
+- Consume subscription data
+
+```python
+data = sub.consume()
+for d in data:
+ print(d)
+```
+
+- Unsubscription
+
+```python
+sub.close()
+```
+
+- Close connection
+
+```python
+c1.close()
+conn.close()
+```
+
+#### Using nanosecond in Python connector
+
+So far Python still does not completely support nanosecond type. Please refer to the link 1 and 2. The implementation of the python connector is to return an integer number for nanosecond value rather than datatime type as what ms and us do. The developer needs to handle it themselves. We recommend using pandas to_datetime() function. If Python officially support nanosecond in the future, TAOS Data might be possible to change the interface accordingly, which mean the application need change too.
+
+1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
+2. https://www.python.org/dev/peps/pep-0564/
+
+#### Helper
+
+Users can directly view the usage information of the module through Python's helper, or refer to the sample program in tests/examples/Python. The following are some common classes and methods:
+
+- *TDengineConnection* class
+
+Refer to help (taos.TDEngineConnection) in python. This class corresponds to a connection between the client and TDengine. In the scenario of client multithreading, it is recommended that each thread apply for an independent connection instance, but not recommended that multiple threads share a connection.
+
+- *TDengineCursor* class
+
+Refer to help (taos.TDengineCursor) in python. This class corresponds to the write and query operations performed by the client. In the scenario of client multithreading, this cursor instance must be kept exclusive to threads and cannot be used by threads, otherwise errors will occur in the returned results.
+
+- *connect* method
+
+Used to generate an instance of taos.TDengineConnection.
+
+### Python client code sample
+
+In tests/examples/python, we provide a sample Python program read_example. py to guide you to design your own write and query program. After installing the corresponding client, introduce the taos class through `import taos`. The steps are as follows:
+
+- Get the `TDengineConnection` object through `taos.connect`, which can be applied for only one by a program and shared among multiple threads.
+
+- Get a new cursor object through the `.cursor ()` method of the `TDengineConnection` object, which must be guaranteed to be exclusive to each thread.
+
+- Execute SQL statements for writing or querying through the `execute()` method of the cursor object.
+
+- If a write statement is executed, `execute` returns the number of rows successfully written affected rows.
+
+- If the query statement is executed, the result set needs to be pulled through the fetchall method after the execution is successful.
+
+ You can refer to the sample code for specific methods.
+
+## RESTful Connector
+
+To support the development of various types of platforms, TDengine provides an API that conforms to REST design standards, that is, RESTful API. In order to minimize the learning cost, different from other designs of database RESTful APIs, TDengine directly requests SQL statements contained in BODY through HTTP POST to operate the database, and only needs a URL. See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1965.html) for the use of RESTful connectors.
+
+### HTTP request format
+
+```
+http://:/rest/sql
+```
+
+Parameter description:
+
+- IP: Any host in the cluster
+- PORT: httpPort configuration item in the configuration file, defaulting to 6041
+
+For example: [http://192.168.0.1](http://192.168.0.1/): 6041/rest/sql is a URL that points to an IP address of 192.168. 0.1.
+
+The header of HTTP request needs to carry identity authentication information. TDengine supports Basic authentication and custom authentication. Subsequent versions will provide standard and secure digital signature mechanism for identity authentication.
+
+- Custom identity authentication information is as follows (We will introduce later)
+
+```
+Authorization: Taosd
+```
+
+- Basic identity authentication information is as follows
+
+```
+Authorization: Basic
+```
+
+The BODY of HTTP request is a complete SQL statement. The data table in the SQL statement should provide a database prefix, such as \.\. If the table name does not have a database prefix, the system returns an error. Because the HTTP module is just a simple forwarding, there is no current DB concept.
+
+Use curl to initiate an HTTP Request through custom authentication. The syntax is as follows:
+
+```bash
+curl -H 'Authorization: Basic ' -d '' :/rest/sql
+```
+
+or
+
+```bash
+curl -u username:password -d '' :/rest/sql
+```
+
+Where `TOKEN` is the string of `{username}:{password}` encoded by Base64, for example, `root:taosdata` will be encoded as `cm9vdDp0YW9zZGF0YQ==`.
+
+### HTTP return format
+
+The return value is in JSON format, as follows:
+
+```json
+{
+ "status": "succ",
+ "head": ["ts","current", …],
+ "column_meta": [["ts",9,8],["current",6,4], …],
+ "data": [
+ ["2018-10-03 14:38:05.000", 10.3, …],
+ ["2018-10-03 14:38:15.000", 12.6, …]
+ ],
+ "rows": 2
+}
+```
+
+Description:
+
+- status: Informs whether the operation results are successful or failed.
+- head: The definition of the table, with only one column "affected_rows" if no result set is returned. (Starting from version 2.0. 17, it is recommended not to rely on the head return value to judge the data column type, but to use column_meta. In future versions, head may be removed from the return value.)
+- column_meta: Starting with version 2.0. 17, this item is added to the return value to indicate the data type of each column in the data. Each column will be described by three values: column name, column type and type length. For example, ["current", 6, 4] means that the column name is "current"; the column type is 6, that is, float type; the type length is 4, which corresponds to a float represented by 4 bytes. If the column type is binary or nchar, the type length indicates the maximum content length that the column can save, rather than the specific data length in this return value. When the column type is nchar, its type length indicates the number of Unicode characters that can be saved, not bytes.
+- data: The specific returned data, rendered line by line, if no result set is returned, then only [[affected_rows]]. The order of the data columns for each row in data is exactly the same as the order of the data columns described in column_meta.
+- rows: Indicates the total number of rows of data.
+
+Column types in column_meta:
+
+* 1:BOOL
+* 2:TINYINT
+* 3:SMALLINT
+* 4:INT
+* 5:BIGINT
+* 6:FLOAT
+* 7:DOUBLE
+* 8:BINARY
+* 9:TIMESTAMP
+* 10:NCHAR
+
+### Custom authorization code
+
+The HTTP request requires the authorization code `` for identification. Authorization codes are usually provided by administrators. Authorization codes can be obtained simply by sending `HTTP GET` requests as follows:
+
+```bash
+curl http://:6041/rest/login//
+```
+
+Where `ip` is the IP address of the TDengine database, `username` is the database user name, `password` is the database password, and the return value is in `JSON` format. The meanings of each field are as follows:
+
+- status: flag bit for request result
+- code: code of return value
+- desc: Authorization code
+
+Sample to get authorization code:
+
+```bash
+curl http://192.168.0.1:6041/rest/login/root/taosdata
+```
+
+Return value:
+
+```json
+{
+ "status": "succ",
+ "code": 0,
+ "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"
+}
+```
+
+### Use case
+
+- Lookup all records of table d1001 in demo database:
+
+```bash
+curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
+```
+
+Return value:
+
+```json
+{
+ "status": "succ",
+ "head": ["ts","current","voltage","phase"],
+ "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
+ "data": [
+ ["2018-10-03 14:38:05.000",10.3,219,0.31],
+ ["2018-10-03 14:38:15.000",12.6,218,0.33]
+ ],
+ "rows": 2
+}
+```
+
+- Create a database demo:
+
+```bash
+curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
+```
+
+Return value:
+
+```json
+{
+ "status": "succ",
+ "head": ["affected_rows"],
+ "column_meta": [["affected_rows",4,4]],
+ "data": [[1]],
+ "rows": 1
+}
+```
+
+### Other cases
+
+### Result set in Unix timestamp
+
+When the HTTP request URL is sqlt, the timestamp of the returned result set will be expressed in Unix timestamp format, for example:
+
+```bash
+curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
+```
+
+Return value:
+
+```json
+{
+ "status": "succ",
+ "head": ["ts","current","voltage","phase"],
+ "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
+ "data": [
+ [1538548685000,10.3,219,0.31],
+ [1538548695000,12.6,218,0.33]
+ ],
+ "rows": 2
+}
+```
+
+#### Result set in UTC time string
+
+When the HTTP request URL is `sqlutc`, the timestamp of the returned result set will be represented by a UTC time string, for example:
+
+```bash
+ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
+```
+
+Return value:
+
+```json
+{
+ "status": "succ",
+ "head": ["ts","current","voltage","phase"],
+ "column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
+ "data": [
+ ["2018-10-03T14:38:05.000+0800",10.3,219,0.31],
+ ["2018-10-03T14:38:15.000+0800",12.6,218,0.33]
+ ],
+ "rows": 2
+}
+```
+
+### Important configuration options
+
+Only some configuration parameters related to RESTful interface are listed below. Please refer to the instructions in the configuration file for other system parameters. Note: After the configuration is modified, the taosd service needs to be restarted before it can take effect.
+
+- httpPort: The port number that provides RESTful services externally, which is bound to 6041 by default
+- httpMaxThreads: The number of threads started, the default is 2 (starting with version 2.0. 17, the default value is changed to half of the CPU cores and rounded down)
+- restfulRowLimit: The maximum number of result sets returned (in JSON format), default 10240
+- httpEnableCompress: Compression is not supported by default. Currently, TDengine only supports gzip compression format
+- httpdebugflag: Logging switch, 131: error and alarm information only, 135: debugging information, 143: very detailed debugging information, default 131
+
+
+
+## CSharp Connector
+
+The C # connector supports: Linux 64/Windows x64/Windows x86.
+
+### Installation preparation
+
+- For application driver installation, please refer to the[ steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver).
+- . NET interface file TDengineDrivercs.cs and reference sample TDengineTest.cs are both located in the Windows client install_directory/examples/C# directory.
+- On Windows, C # applications can use the native C interface of TDengine to perform all database operations, and future versions will provide the ORM (Dapper) framework driver.
+
+### Installation verification
+
+Run install_directory/examples/C#/C#Checker/C#Checker.exe
+
+```cmd
+cd {install_directory}/examples/C#/C#Checker
+csc /optimize *.cs
+C#Checker.exe -h
+```
+
+### How to use C# connector
+
+On Windows system, .NET applications can use the .NET interface of TDengine to perform all database operations. The steps to use it are as follows:
+
+1. Add the. NET interface file TDengineDrivercs.cs to the .NET project where the application is located.
+2. Users can refer to TDengineTest.cs to define database connection parameters and how to perform data insert, query and other operations;
+
+This. NET interface requires the taos.dll file, so before executing the application, copy the taos.dll file in the Windows client install_directory/driver directory to the folder where the. NET project finally generated the .exe executable file. After running the exe file, you can access the TDengine database and do operations such as insert and query.
+
+**Note:**
+
+1. TDengine V2.0. 3.0 supports both 32-bit and 64-bit Windows systems, so when. NET project generates a .exe file, please select the corresponding "X86" or "x64" for the "Platform" under "Solution"/"Project".
+2. This. NET interface has been verified in Visual Studio 2015/2017, and other VS versions have yet to be verified.
+
+### Third-party Driver
+
+Maikebing.Data.Taos is an ADO.Net provider for TDengine that supports Linux, Windows. This development package is provided by enthusiastic contributor 麦壳饼@@maikebing. For more details:
+
+```
+// Download
+https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
+// How to use
+https://www.taosdata.com/blog/2020/11/02/1901.html
+```
+
+## Go Connector
+
+### Installation preparation
+
+- For application driver installation, please refer to the [steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver).
+
+The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details.
+
+Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html).
+
+```Go
+import (
+ "database/sql"
+ _ "github.com/taosdata/driver-go/taosSql"
+)
+```
+
+**It is recommended to use Go version 1.13 or above and turn on module support:**
+
+```bash
+go env -w GO111MODULE=on
+go env -w GOPROXY=https://goproxy.io,direct
+```
+
+### Common APIs
+
+- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
+
+This API is used to open DB and return an object of type \* DB. Generally, DRIVER_NAME is set to the string `taosSql`, and dataSourceName is set to the string `user:password@/tcp(host:port)/dbname`. If the customer wants to access TDengine with multiple goroutines concurrently, it is necessary to create a `sql.Open` object in each goroutine and use it to access TDengine.
+
+**Note**: When the API is successfully created, there is no permission check. Only when Query or Exec is actually executed can the connection be truly created and whether the user/password/host/port is legal can be checked at the same time. In addition, because most of the implementation of the whole driver sinks into libtaos, which taosSql depends on. Therefore, sql.Open itself is particularly lightweight.
+
+- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
+
+`sql.Open` built-in method to execute non-query related SQL
+
+- `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)`
+
+`sql.Open` built-in method used to execute query statements
+
+- `func (db *DB) Prepare(query string) (*Stmt, error)`
+
+`sql.Open` built-in method used to create a prepared statement for later queries or executions.
+
+- `func (s *Stmt) Exec(args ...interface{}) (Result, error)`
+
+`sql.Open` built-in method to execute a prepared statement with the given arguments and returns a Result summarizing the effect of the statement.
+
+- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)`
+
+`sql.Open` built-in method to query executes a prepared query statement with the given arguments and returns the query results as a \*Rows.
+
+- `func (s *Stmt) Close() error`
+
+`sql.Open` built-in method to closes the statement.
+
+## Node.js Connector
+
+The Node.js connector supports the following systems:
+
+| **CPU Type** | x64(64bit) | | | aarch64 | aarch32 |
+| -------------------- | ---------------------------- | ------- | ------- | ----------- | ----------- |
+| **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
+| **Supported or Not** | **Yes** | **Yes** | **Yes** | **Yes** | **Yes** |
+
+See the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for use of the Node.js connector.
+
+### Installation preparation
+
+- For application driver installation, please refer to the [steps of installing connector driver](https://www.taosdata.com/en/documentation/connector#driver).
+
+### Install Node.js connector
+
+Users can install it through [npm](https://www.npmjs.com/) or through the source code src/connector/nodejs/. The specific installation steps are as follows:
+
+First, install the node.js connector through [npm](https://www.npmjs.com/).
+
+```bash
+npm install td2.0-connector
+```
+
+We recommend that use npm to install the node.js connector. If you do not have npm installed, you can copy src/connector/nodejs/ to your nodejs project directory.
+
+We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with the TDengine server. Before installing the node.js connector, you also need to install the following software:
+
+### Linux
+
+- python (recommended v2.7, not currently supported in v3.x.x)
+- node 2.0. 6 supports v12. x and v10. x, 2.0. 5 and earlier supports v10. x, and other versions may have package compatibility issues.
+- make
+- [GCC](https://gcc.gnu.org/) and other C compilers
+
+### Windows
+
+#### Solution 1
+
+Use Microsoft [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) to install all necessary tools by executing npm install --global --production windows-build-tools in cmd command line interface.
+
+#### Solution 2
+
+Manually install the following tools:
+
+- Install Visual Studio related tools: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
+- Install [Python](https://www.python.org/downloads/) 2.7 (not supported in v3.x.x) and execute npm config set python python2.7
+- Open `cmd`, `npm config set msvs_version 2017`
+
+If the steps above cannot be performed successfully, you can refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules).
+
+If you use ARM64 Node.js on Windows 10 ARM, you also need to add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
+
+#### Sample
+
+The sample program source code is located in install_directory/examples/nodejs, and there are:
+
+Node-example.js node.js sample source code Node-example-raw. js
+
+### Installation verification
+
+After installing the TDengine client, the nodejsChecker.js program can verify whether the current environment supports access to TDengine via nodejs.
+
+Steps:
+
+1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js).
+
+2. Execute the following command:
+
+ ```bash
+ npm init -y
+ npm install td2.0-connector
+ node nodejsChecker.js host=localhost
+ ```
+
+3. After performing the above steps, the nodejs connection Tdengine instance will be outputted on the command line, and the short-answer of insertion and query will be executed.
+
+### How to use Node.js
+
+The following are some basic uses of node.js connector. Please refer to [TDengine Node.js connector](http://docs.taosdata.com/node) for details.
+
+### Create connection
+
+When using the node.js connector, you must execute require `td2.0-connector`, and then use the `taos.connect` function. The parameter that `taos.connect` function must provide is `host`, and other parameters will use the following default values if they are not provided. Finally, the `cursor` needs to be initialized to communicate with the TDengine server-side.
+
+```javascript
+const taos = require('td2.0-connector');
+var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
+var cursor = conn.cursor(); // Initializing a new cursor
+```
+
+To close the connect:
+
+```javascript
+conn.close();
+```
+
+#### To execute SQL and insert data
+
+For DDL statements (such as `create database`, `create table`, `use`, and so on), you can use the `execute` method of `cursor`. The code is as follows:
+
+```js
+cursor.execute('create database if not exists test;')
+```
+
+The above code creates a database named test. For DDL statements, there is generally no return value, and the execute return value of `cursor` is 0.
+
+For Insert statements, the code is as follows:
+
+```js
+var affectRows = cursor.execute('insert into test.weather values(now, 22.3, 34);')
+```
+
+The return value of the execute method is the number of rows affected by the statement. If the sql above inserts a piece of data into the weather table of the test database, the return value affectRows is 1.
+
+TDengine does not currently support update and delete statements.
+
+#### Query
+
+You can query the database through `cursor.query` function.
+
+```javascript
+var query = cursor.query('show databases;')
+```
+
+The results of the query can be obtained and printed through `query.execute()` function:
+
+```javascript
+var promise = query.execute();
+promise.then(function(result) {
+ result.pretty();
+});
+```
+
+You can also use the `bind` method of `query` to format query statements. For example: `query` automatically fills the `?` with the value provided in the query statement .
+
+```javascript
+var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
+query.execute().then(function(result) {
+ result.pretty();
+})
+```
+
+If you provide the second parameter in the `query` statement and set it to `true`, you can also get the query results immediately. As follows:
+
+```javascript
+var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
+promise.then(function(result) {
+ result.pretty();
+})
+```
+
+#### Asynchronous function
+
+The operation of asynchronous query database is similar to the above, only by adding `_a` after `cursor.execute`, `TaosQuery.execute` and other functions.
+
+```javascript
+var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
+var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
+promise1.then(function(result) {
+ result.pretty();
+})
+promise2.then(function(result) {
+ result.pretty();
+})
+```
+
+### Example
+
+[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) provides a code example that uses the NodeJS connector to create a table, insert weather data, and query the inserted data.
+
+[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) is also a code example that uses the NodeJS connector to create a table, insert weather data, and query the inserted data, but unlike the above, this example only uses cursor.
diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..e759da31677a5344a0f6578c751c4b77f86a43db
--- /dev/null
+++ b/documentation20/en/09.connections/docs.md
@@ -0,0 +1,157 @@
+# Connections with Other Tools
+
+## Grafana
+
+TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
+
+### Install Grafana
+
+TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
+
+https://grafana.com/grafana/download.
+
+### Configure Grafana
+
+TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
+
+Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
+
+```bash
+sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
+```
+
+### Use Grafana
+
+#### Configure data source
+
+You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
+
+
+
+Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
+
+
+
+Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
+
+
+
+- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
+- User: TDengine username.
+- Password: TDengine user password.
+
+Click `Save & Test` to test. Success will be prompted as follows:
+
+
+
+#### Create Dashboard
+
+Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
+
+
+
+As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
+
+- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
+- ALIAS BY: You can set alias for the current queries.
+- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
+
+According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
+
+
+
+> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
+
+#### Import Dashboard
+
+A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
+
+Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
+
+
+
+You can see as follows after Dashboard imported.
+
+
+
+## MATLAB
+
+MATLAB can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
+
+### JDBC Interface Adaptation of MATLAB
+
+Several steps are required to adapt MATLAB to TDengine. Taking adapting MATLAB2017a on Windows10 as an example:
+
+- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
+- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
+- Add the .jar package just copied to the MATLAB classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
+- ```
+ $matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
+ ```
+
+- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32,then add the following line in file javalibrarypath.txt:
+- ```
+ C:\Windows\System32
+ ```
+
+- ### Connect to TDengine in MATLAB to get data
+
+After the above configured successfully, open MATLAB.
+
+- Create a connection:
+
+```matlab
+conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)
+```
+
+* Make a query:
+
+```matlab
+sql0 = [‘select * from tb’]
+data = select(conn, sql0);
+```
+
+* Insert a record:
+
+```matlab
+sql1 = [‘insert into tb values (now, 1)’]
+exec(conn, sql1)
+```
+
+For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
+
+## R
+
+R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
+
+```R
+install.packages('RJDBC', repos='http://cran.us.r-project.org')
+```
+
+After installed, load the RJDBC package by executing `library('RJDBC')` command.
+
+Then load the TDengine JDBC driver:
+
+```R
+drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
+```
+
+If succeed, no error message will display. Then use the following command to try a database connection:
+
+```R
+conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
+```
+
+Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
+
+- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
+- `dbGetQuery(conn, "select count(*) from test")`: run a query command
+- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
+- `dbReadTable(conn, "test")`: read all the data in table test
+- `dbDisconnect(conn)`: close a connection
+- `dbRemoveTable(conn, "test")`: remove table test
+
+The functions below are not supported currently:
+
+- `dbExistsTable(conn, "test")`: if table test exists
+- `dbListTables(conn)`: list all tables in the connection
\ No newline at end of file
diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..d7d908ff424270d9aa33f89eefd36e73f6ab68b2
--- /dev/null
+++ b/documentation20/en/10.cluster/docs.md
@@ -0,0 +1,235 @@
+# TDengine Cluster Management
+
+Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function.
+
+Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node.
+
+The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail.
+
+Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1961.html) for cluster building.
+
+## Preparation
+
+**Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS]
+
+**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)"
+
+**Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up;
+
+**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file.
+
+**Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built;
+
+**Step 3:** Install TDengine on all physical nodes, and the version must be consistent, **but do not start taosd**. During installation, when prompted to enter whether to join an existing TDengine cluster, press enter for the first physical node directly to create a new cluster, and enter the FQDN: port number (default 6030) of any online physical node in the cluster for the subsequent physical nodes;
+
+**Step 4:** Check the network settings of all data nodes and the physical nodes where the application is located:
+
+1. Execute command `hostname -f` on each physical node, and check and confirm that the hostnames of all nodes are different (the node where the application driver is located does not need to do this check).
+2. Execute `ping host` on each physical node, wherein host is that hostname of other physical node, and see if other physical nodes can be communicated to; if not, you need to check the network settings, or the /etc/hosts file (the default path for Windows systems is C:\ Windows\ system32\ drivers\ etc\ hosts), or the configuration of DNS. If it fails to ping, then we cann't build the cluster.
+3. From the physical node where the application runs, ping the data node where taosd runs. If the ping fails, the application cannot connect to taosd. Please check the DNS settings or hosts file of the physical node where the application is located;
+4. The End Point of each data node is the output hostname plus the port number, for example, [h1.taosdata.com](http://h1.taosdata.com/): 6030
+
+**Step 5:** Modify the TDengine configuration file (the file/etc/taos/taos.cfg for all nodes needs to be modified). Assume that the first data node End Point to be started is [h1.taosdata.com](http://h1.taosdata.com/): 6030, and its parameters related to cluster configuration are as follows:
+
+```
+// firstEp is the first data node connected after each data node’s first launch
+firstEp h1.taosdata.com:6030
+// Must configure it as the FQDN of this data node. If this machine has only one hostname, you can comment out this configuration
+fqdn h1.taosdata.com
+// Configure the port number of this data node, the default is 6030
+serverPort 6030
+// For application scenarios, please refer to the section “Use of Arbitrator”
+arbitrator ha.taosdata.com:6042
+```
+
+The parameters that must be modified are firstEp and fqdn. At each data node, every firstEp needs to be configured to be the same, **but fqdn must be configured to the value of the data node where it is located**. Other parameters may not be modified unless you have clear reasons.
+
+**The data node dnode added to the cluster must be exactly the same as the 11 parameters in the following table related to the cluster, otherwise it cannot be successfully added to the cluster.**
+
+
+
+| **#** | **Configuration Parameter Name** | **Description** |
+| ----- | -------------------------------- | ------------------------------------------------------------ |
+| 1 | numOfMnodes | Number of management nodes in system |
+| 2 | mnodeEqualVnodeNum | A mnode equals to the number of vnodes consumed |
+| 3 | offlineThreshold | Offline threshold of dnode to judge if the dnode is offline |
+| 4 | statusInterval | The interval for dnode to report its status to mnode |
+| 5 | arbitrator | The end point of the arbitrator in system |
+| 6 | timezone | Time zone |
+| 7 | locale | Location information and coding format of system |
+| 8 | charset | Character set encoding |
+| 9 | balance | Whether to start load balancing |
+| 10 | maxTablesPerVnode | The maximum number of tables that can be created in each vnode |
+| 11 | maxVgroupsPerDb | The maximum number of vgroups that can be used per DB |
+
+## Launch the First Data Node
+
+Follow the instructions in "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)", launch the first data node, such as [h1.taosdata.com](http://h1.taosdata.com/), then execute taos, start the taos shell, and execute command "show dnodes" from the shell; ", as follows:
+
+```
+Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
+Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
+
+taos> show dnodes;
+ id | end_point | vnodes | cores | status | role | create_time |
+=====================================================================================
+ 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
+Query OK, 1 row(s) in set (0.006385s)
+
+taos>
+```
+
+In the above command, you can see that the End Point of the newly launched data node is: [h1.taos.com](http://h1.taos.com/): 6030, which is the firstEP of the new cluster.
+
+## Launch Subsequent Data Nodes
+
+To add subsequent data nodes to the existing cluster, there are the following steps:
+
+1. Start taosd at each physical node according to the chapter "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)";
+
+2. On the first data node, use CLI program taos to log in to TDengine system and execute the command:
+
+ ```
+ CREATE DNODE "h2.taos.com:6030";
+ ```
+
+Add the End Point of the new data node (learned in Step 4 of the preparation) to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. Notice that the example "[h2.taos.com](http://h2.taos.com/): 6030" is replaced with the End Point for this new data node.
+
+3. And then execute the command
+
+1. ```
+ SHOW DNODES;
+ ```
+
+2. Check to see if the new node was successfully joined. If the added data node is offline, then check:
+
+1. - Check whether the taosd of this data node is working properly. If it is not working properly, you need to check the reason first
+ - Check the first few lines of the data node taosd log file taosdlog.0 (usually in the /var/log/taos directory) to see if the data node fqdn and port number output in the log are the just added End Point. If not, you need to add the correct End Point.
+
+According to the above steps, new data nodes can be continuously added to the cluster.
+
+**Tips**:
+
+- Any data node that has joined the cluster online can be used as the firstEP of the subsequent node to be joined.
+- firstEp is only effective when the data node joins the cluster for the first time. After joining the cluster, the data node will save the latest End Point list of mnode and no longer rely on this parameter.
+- The two dnode data nodes dnode that are not configured with the firstEp parameter will run independently after startup. At this time, one data node cannot be added to another data node to form a cluster. **You cannot merge two independent clusters into a new cluster**.
+
+## Data Node Management
+
+The above has already introduced how to build clusters from scratch. After the cluster is formed, new data nodes can be added at any time for expansion, or data nodes can be deleted, and the current status of the cluster can be checked.
+
+### Add data nodes
+
+Execute CLI program taos, log in to the system using root account, and execute:
+
+```
+CREATE DNODE "fqdn:port";
+```
+
+Add the End Point for the new data node to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. The fqdn and port of a data node's external service can be configured through the configuration file taos.cfg, which is automatically obtained by default. [It is strongly not recommended to configure FQDN with automatic acquisition, which may cause the End Point of the generated data node to be not expected]
+
+### Delete data nodes
+
+Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
+
+```
+DROP DNODE "fqdn:port";
+```
+
+Where fqdn is the FQDN of the deleted node, and port is the port number of its external server.
+
+**【Note】**
+
+- Once a data node is dropped, it cannot rejoin the cluster. This node needs to be redeployed (emptying the data folder). The cluster migrates the data from the dnode before it completes the drop dnode operation.
+- Note that dropping a dnode and stopping the taosd process are two different concepts. Don't be confused: the data migration operation must be performed before deleting a dnode, thus the deleted dnode must remain online. The taosd process cannot be stopped until the delete operation is completed.
+- After a data node is dropped, other nodes will perceive the deletion of this dnodeID, and no node in any cluster will receive the request of the dnodeID.
+- dnodeID is automatically assigned by the cluster and cannot be specified manually. It is incremented at the time of generation and does not repeat.
+
+### View data nodes
+
+Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
+
+```
+SHOW DNODES;
+```
+
+All dnodes, fqdn: port for each dnode, status (ready, offline, etc.), number of vnodes, number of unused vnodes in the cluster will be listed. You can use this command to view after adding or deleting a data node.
+
+### View virtual node group
+
+In order to make full use of multi-core technology and provide scalability, data needs to be processed in partitions. Therefore, TDengine will split the data of a DB into multiple parts and store them in multiple vnodes. These vnodes may be distributed in multiple data node dnodes, thus realizing scale-out. A vnode belongs to only one DB, but a DB can have multiple vnodes. vnode is allocated automatically by mnode according to the current system resources without any manual intervention.
+
+Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
+
+```
+SHOW VGROUPS;
+```
+
+## High-availability of vnode
+
+TDengine provides high-availability of system through a multi-replica mechanism, including high-availability of vnode and mnode.
+
+The number of replicas of vnode is associated with DB. There can be multiple DBs in a cluster. Each DB can be configured with different replicas according to operational requirements. When creating a database, specify the number of replicas with parameter replica (the default is 1). If the number of replicas is 1, the reliability of the system cannot be guaranteed. As long as the node where the data is located goes down, the service cannot be provided. The number of nodes in the cluster must be greater than or equal to the number of replicas, otherwise the error "more dnodes are needed" will be returned when creating a table. For example, the following command will create a database demo with 3 replicas:
+
+```
+CREATE DATABASE demo replica 3;
+```
+
+The data in a DB will be partitioned and splitted into multiple vnode groups. The number of vnodes in a vnode group is the number of replicas of the DB, and the data of each vnode in the same vnode group is completely consistent. In order to ensure high-availability, the vnodes in a vnode group must be distributed in different dnode data nodes (in actual deployment, they need to be on different physical machines). As long as more than half of the vnodes in a vgroup are working, the vgroup can be normally serving.
+
+There may be data from multiple DBs of data in a data node dnode, so when a dnode is offline, it may affect multiple DBs. If half or more of the vnodes in a vnode group do not work, then the vnode group cannot serve externally and cannot insert or read data, which will affect the reading and writing operations of some tables in the DB to which it belongs.
+
+Because of the introduction of vnode, it is impossible to simply draw a conclusion: "If more than half of the data nodes in the cluster work in dnode, the cluster should work." But for simple cases, it is easier to judge. For example, if the number of replicas is 3 and there are only 3 dnodes, the whole cluster can still work normally if only one node does not work, but if two data nodes do not work, the whole cluster cannot work normally.
+
+## High-availability of mnode
+
+TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously.
+
+A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos:
+
+```
+SHOW MNODES;
+```
+
+To view the mnode list, which lists the End Point and roles (master, slave, unsynced, or offline) of the dnode where the mnode is located. When the first data node in the cluster starts, the data node must run an mnode instance, otherwise the dnode of the data node cannot work properly because a system must have at least one mnode. If numOfMnodes is configured to 2, when the second dnode is started, the latter will also run an mnode instance.
+
+To ensure the high-availability of mnode service, numOfMnodes must be set to 2 or greater. Because the metadata saved by mnode must be strongly consistent, if numOfMnodes is greater than 2, the duplication parameter quorum is automatically set to 2, that is to say, at least two replicas must be guaranteed to write the data successfully before notifying the client application of successful writing.
+
+**Note:** A TDengine highly-available system, whether vnode or mnode, must be configured with multiple replicas.
+
+## Load Balancing
+
+There are three situations in which load balancing will be triggered, and no manual intervention is required.
+
+- When a new data node is added to the cluster, the system will automatically trigger load balancing, and the data on some nodes will be automatically migrated to the new data node without any manual intervention.
+- When a data node is removed from the cluster, the system will automatically migrate the data on the data node to other data nodes without any manual intervention.
+- If a data node is overheated (too large amount of data), the system will automatically load balance and migrate some vnodes of the data node to other nodes.
+
+When the above three situations occur, the system will start a load computing of each data node to decide how to migrate.
+
+**[Tip] Load balancing is controlled by parameter balance, which determines whether to start automatic load balancing.**
+
+## Offline Processing of Data Nodes
+
+If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations:
+
+- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again.
+- After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally.
+
+**Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again.
+
+## How to Use Arbitrator
+
+If the number of replicas is even, it is impossible to elect a master from a vnode group when half of the vnodes are not working. Similarly, when half of the mnodes are not working, the master of the mnode cannot be elected because of the "split brain" problem. To solve this problem, TDengine introduced the concept of Arbitrator. Arbitrator simulates a vnode or mnode working, but is simply responsible for networking, and does not handle any data insertion or access. As long as more than half of the vnodes or mnodes, including the Arbitrator, work, the vnode group or mnode group can normally provide data insertion or query services. For example, in the case of 2 replicas, if one node A is offline, but the other node B is normal on and can connect to the Arbitrator, then node B can work normally.
+
+In a word, under the current version, TDengine recommends configuring Arbitrator in double-replica environment to improve the availability.
+
+The name of the executable for Arbitrator is tarbitrator. The executable has almost no requirements for system resources, just need to ensure a network connection, with any Linux server to run it. The following briefly describes the steps to install the configuration:
+
+
+
+1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install.
+2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042.
+3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.)
+4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb".
+
diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..3817a41766d515d663661fd4382c883e0d8f179b
--- /dev/null
+++ b/documentation20/en/11.administrator/docs.md
@@ -0,0 +1,496 @@
+# TDengine Operation and Maintenance
+
+## Capacity Planing
+
+Using TDengine to build an IoT big data platform, computing resource and storage resource need to be planned according to business scenarios. The following is a discussion of the memory, CPU and hard disk space required for the system to run.
+
+### Memory requirements
+
+Each DB can create a fixed number of vgroups, which is the same as the CPU cores by default and can be configured by maxVgroupsPerDb; each replica in the vgroup would be a vnode; each vnode takes up a fixed amount of memory (the size is related to the database's configuration parameters blocks and cache); each table takes up memory related to the total length of the tag; in addition, the system will have some fixed memory overhead. Therefore, the system memory required for each DB can be calculated by the following formula:
+
+```
+Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
+```
+
+Example: Assuming a 4-core machine, cache is the default size of 16M, blocks is the default value of 6, assuming there are 100,000 tables, and the total tag length is 256 bytes, the total memory requirement is: 4 * (16 * 6 + 10) + 100,000 * (0.25 + 0.5)/1000 = 499M.
+
+The actual running system often stores the data in different DBs according to different characteristics of the data. All these shall be considered when planning.
+
+If there is plenty of memory, the configuration of Blocks can be increased so that more data will be stored in memory and the query speed will be improved.
+
+### CPU requirements
+
+CPU requirements depend on the following two aspects:
+
+- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches.
+- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine.
+
+Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources.
+
+### Storage requirements
+
+Compared with general databases, TDengine has an ultra-high compression ratio. In most scenarios, the compression ratio of TDengine will not be less than 5:1, and in some scenarios, maybe over 10:1, depending on the actual data characteristics. The raw data size before compressed can be calculated as follows:
+
+```
+Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
+```
+
+Example: 10 million smart meters, each meter collects data every 15 minutes, and the data collected each time is 128 bytes, so the original data amount in one year is: 10000000 * 128 * 24 * 60/15 * 365 = 44.8512 T. The TDengine consumes approximately 44.851/5 = 8.97024 T.
+
+User can set the maximum retention time of data on disk through parameter `keep`. In order to further reduce the storage cost, TDengine also provides tiered storage. The coldest data can be stored on the cheapest storage media. Application access does not need to be adjusted, but lower reading speed.
+
+To improve speed, multiple hard disks can be configured so that data can be written or read concurrently. It should be reminded that TDengine provides high reliability of data in the form of multiple replicas, so it is no longer necessary to use expensive disk arrays.
+
+### Number of physical or virtual machines
+
+According to the above estimation of memory, CPU and storage, we can know how many cores, how much memory and storage space the whole system needs. If the number of data replicas is not 1, the total demand needs to be multiplied by the number of replicas.
+
+Because TDengine provides great scale-out feature, it is easy to decide how many physical or virtual machines need to be purchased according to the total amount and the resources of a single physical/ virtual machine.
+
+**Calculate CPU, memory and storage immediately, see:** [**Resource Estimation**](https://www.taosdata.com/config/config.html)
+
+### Fault Tolerance and Disaster Recovery
+
+### Fault tolerance
+
+TDengine supports WAL (Write Ahead Log) mechanism to realize fault tolerance of data and ensure high-availability of data.
+
+When TDengine receives the application's request packet, it first writes the requested original packet into the database log file, and then deletes the corresponding WAL after the data is successfully written. This ensures that TDengine can recover data from the database log file when the service is restarted due to power failure or other reasons, thus avoiding data loss.
+
+There are two system configuration parameters involved:
+
+- walLevel: WAL level, 0: do not write wal; 1: write wal, but do not execute fsync; 2: write wal and execute fsync.
+- fsync: the cycle in which fsync is executed when walLevel is set to 2. Setting to 0 means that fsync is executed immediately whenever there is a write.
+
+To guarantee 100% data safe, you need to set walLevel to 2 and fsync to 0. In that way, the write speed will decrease. However, if the number of threads starting to write data on the application side reaches a certain number (more than 50), the performance of writing data will also be good, only about 30% lower than that of fsync set to 3000 milliseconds.
+
+### Disaster recovery
+
+The cluster of TDengine provides high-availability of the system and implements disaster recovery through the multipl-replica mechanism.
+
+TDengine cluster is managed by mnode. In order to ensure the high reliability of the mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes. In order to support high reliability, it needs to be set to be greater than 1. In order to ensure the strong consistency of metadata, mnode replicas duplicate data synchronously to ensure the strong consistency of metadata.
+
+The number of replicas of time-series data in TDengine cluster is associated with databases. There can be multiple databases in a cluster, and each database can be configured with different replicas. When creating a database, specify the number of replicas through parameter replica. In order to support high reliability, it is necessary to set the number of replicas greater than 1.
+
+The number of nodes in TDengine cluster must be greater than or equal to the number of replicas, otherwise an error will be reported in table creation.
+
+When the nodes in TDengine cluster are deployed on different physical machines and multiple replicas are set, the high reliability of the system is implemented without using other software or tools. TDengine Enterprise Edition can also deploy replicas in different server rooms, thus realizing remote disaster recovery.
+
+## Server-side Configuration
+
+The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory.
+
+You can also use “-C” to show the current server configuration parameters:
+
+```
+taosd -C
+```
+
+Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.**
+
+- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030.
+- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node.
+- serverPort: the port number of the external service after taosd started, the default value is 6030.
+- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041.
+- dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos).
+- logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos).
+- arbitrator: the end point of the arbiter in the system; the default value is null.
+- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated
+- debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values).
+- numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines.
+- logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days.
+- maxSQLLength: the maximum length allowed for a single SQL statement. Default: 65380 bytes.
+- telemetryReporting: whether TDengine is allowed to collect and report basic usage information. 0 means not allowed, and 1 means allowed. Default: 1.
+- stream: whether continuous query (a stream computing function) is enabled, 0 means not allowed, 1 means allowed. Default: 1.
+- queryBufferSize: the amount of memory reserved for all concurrent queries. The calculation rule can be multiplied by the number of the table according to the maximum possible concurrent number in practical application, and then multiplied by 170. The unit is MB (in versions before 2.0. 15, the unit of this parameter is byte).
+- ratioOfQueryCores: set the maximum number of query threads. The minimum value of 0 means that there is only one query thread; the maximum value of 2 indicates that the maximum number of query threads established is 2 times the number of CPU cores. The default is 1, which indicates the maximum number of query threads equals to the number of CPU cores. This value can be a decimal, that is, 0.5 indicates that the query thread with half of the maximum CPU cores is established.
+
+**Note:** for ports, TDengine will use 13 continuous TCP and UDP port numbers from serverPort, so be sure to open them in the firewall. Therefore, if it is the default configuration, a total of 13 ports from 6030 to 6042 need to be opened, and the same for both TCP and UDP.
+
+Data in different application scenarios often have different data characteristics, such as retention days, number of replicas, collection frequency, record size, number of collection points, compression, etc. In order to obtain the best efficiency in storage, TDengine provides the following storage-related system configuration parameters:
+
+- days: the time span for a data file to store data, in days, the default value is 10.
+- keep: the number of days to keep data in the database, in days, default value: 3650.
+- minRows: the minimum number of records in a file block, in pieces, default: 100.
+- maxRows: the maximum number of records in a file block, in pieces, default: 4096.
+- comp: file compression flag bit, 0: off; 1: one-stage compression; 2: two-stage compression. Default: 2.
+- walLevel: WAL level. 1: write wal, but do not execute fsync; 2: write wal and execute fsync. Default: 1.
+- fsync: the period during which fsync is executed when wal is set to 2. Setting to 0 means that fsync is executed immediately whenever a write happens, in milliseconds, and the default value is 3000.
+- cache: the size of the memory block in megabytes (MB), default: 16.
+- blocks: how many cache-sized memory blocks are in each VNODE (TSDB). Therefore, the memory size used by a VNODE is roughly (cache * blocks), in blocks, and the default value is 4.
+- replica: number of replicas; value range: 1-3, in items, default value: 1
+- precision: timestamp precision identification, ms for milliseconds and us for microseconds. Default: ms
+- cacheLast: whether the sub-table last_row is cached in memory, 0: off; 1: on. Default: 0. (This parameter is supported as of version 2.0. 11)
+
+For an application scenario, there may be data with multiple characteristics coexisting. The best design is to put tables with the same data characteristics in one database. Such an application has multiple databases, and each one can be configured with different storage parameters, thus ensuring the optimal performance of the system. TDengine allows the application to specify the above storage parameter in database creation. If specified, the parameters will override the corresponding system configuration parameters. For example, there is the following SQL:
+
+```
+ create database demo days 10 cache 32 blocks 8 replica 3 update 1;
+```
+
+The SQL creates a database demo, each data file stores 10 days of data, the memory block is 32 megabytes, each VNODE occupies 8 memory blocks, the number of replicas is 3, updates are allowed, and other parameters are completely consistent with the system configuration.
+
+When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows:
+
+- numOfMnodes: the number of management nodes in the system. Default: 3. (Since version 2.0.20.11 and version 2.1.6.0, the default value of "numOfMnodes" has been changed to 1.)
+- balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1.
+- mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4.
+- offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days).
+- statusInterval: the length of time dnode reports status to mnode. The unit is seconds, and the default value is 1.
+- maxTablesPerVnode: the maximum number of tables that can be created in each vnode. Default: 1000000.
+- maxVgroupsPerDb: the maximum number of vgroups that can be used in each database.
+- arbitrator: the end point of the arbiter in system, which is empty by default.
+- See Client Configuration for the configuration of timezone, locale and charset.
+
+For the convenience of debugging, the log configuration of each dnode can be temporarily adjusted through SQL statements, and all will be invalid after system restarting:
+
+```mysql
+ALTER DNODE
+```
+
+- dnode_id: available from the SQL statement "SHOW DNODES" command
+- config: the log parameter to be adjusted, and the value is taken in the following list
+
+resetlog truncates the old log file and creates a new log file debugFlag < 131 135 143 > Set debugFlag to 131, 135 or 143.
+
+For example:
+
+```
+ alter dnode 1 debugFlag 135;
+```
+
+## Client Configuration
+
+The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg.
+
+**Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters**
+
+```bash
+taos -C 或 taos --dump-config
+```
+
+Client configuration parameters:
+
+- firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030.
+- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp.
+- locale
+ Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
+
+ TDengine provides a special field type nchar for storing non-ASCII encoded wide characters such as Chinese, Japanese and Korean. The data written to the nchar field will be uniformly encoded in UCS4-LE format and sent to the server. It should be noted that the correctness of coding is guaranteed by the client. Therefore, if users want to normally use nchar fields to store non-ASCII characters such as Chinese, Japanese, Korean, etc., it’s needed to set the encoding format of the client correctly.
+
+ The characters inputted by the client are all in the current default coding format of the operating system, mostly UTF-8 on Linux systems, and some Chinese system codes may be GB18030 or GBK, etc. The default encoding in the docker environment is POSIX. In the Chinese versions of Windows system, the code is CP936. The client needs to ensure that the character set it uses is correctly set, that is, the current encoded character set of the operating system running by the client, in order to ensure that the data in nchar is correctly converted into UCS4-LE encoding format.
+
+ The naming rules of locale in Linux are: < language > _ < region >. < character set coding >, such as: zh_CN.UTF-8, zh stands for Chinese, CN stands for mainland region, and UTF-8 stands for character set. Character set encoding provides a description of encoding transformations for clients to correctly parse local strings. Linux system and Mac OSX system can determine the character encoding of the system by setting locale. Because the locale used by Windows is not the POSIX standard locale format, another configuration parameter charset is needed to specify the character encoding under Windows. You can also use charset to specify character encoding in Linux systems.
+
+- charset
+
+ Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
+
+ If charset is not set in the configuration file, in Linux system, when taos starts up, it automatically reads the current locale information of the system, and parses and extracts the charset encoding format from the locale information. If the automatic reading of locale information fails, an attempt is made to read the charset configuration, and if the reading of the charset configuration also fails, the startup process is interrupted.
+
+ In Linux system, locale information contains character encoding information, so it is unnecessary to set charset separately after setting locale of Linux system correctly. For example:
+
+ ```
+ locale zh_CN.UTF-8
+ ```
+ On Windows systems, the current system encoding cannot be obtained from locale. If string encoding information cannot be read from the configuration file, taos defaults to CP936. It is equivalent to adding the following to the configuration file:
+ ```
+ charset CP936
+ ```
+ If you need to adjust the character encoding, check the encoding used by the current operating system and set it correctly in the configuration file.
+
+ In Linux systems, if user sets both locale and charset encoding charset, and the locale and charset are inconsistent, the value set later will override the value set earlier.
+ ```
+ locale zh_CN.UTF-8
+ charset GBK
+ ```
+ The valid value for charset is GBK.
+
+ And the valid value for charset is UTF-8.
+
+ The configuration parameters of log are exactly the same as those of server.
+
+- timezone
+
+ Default value: get the current time zone option dynamically from the system
+
+ The time zone in which the client runs the system. In order to deal with the problem of data writing and query in multiple time zones, TDengine uses Unix Timestamp to record and store timestamps. The characteristics of UNIX timestamps determine that the generated timestamps are consistent at any time regardless of any time zone. It should be noted that UNIX timestamps are converted and recorded on the client side. In order to ensure that other forms of time on the client are converted into the correct Unix timestamp, the correct time zone needs to be set.
+
+ In Linux system, the client will automatically read the time zone information set by the system. Users can also set time zones in profiles in a number of ways. For example:
+ ```
+ timezone UTC-8
+ timezone GMT-8
+ timezone Asia/Shanghai
+ ```
+
+ All above are legal to set the format of the East Eight Zone.
+
+ The setting of time zone affects the content of non-Unix timestamp (timestamp string, parsing of keyword now) in query and writing SQL statements. For example:
+
+ ```sql
+ SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
+ ```
+
+ In East Eight Zone, the SQL statement is equivalent to
+ ```sql
+ SELECT count(*) FROM table_name WHERE TS<1554955268000;
+ ```
+
+ In the UTC time zone, the SQL statement is equivalent to
+ ```sql
+ SELECT count(*) FROM table_name WHERE TS<1554984068000;
+ ```
+ In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15:52:01.123+08:00, or ISO-8601 format timestamp strings 2013-04-12T15:52:01.123+0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
+
+ When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.
+
+- maxBinaryDisplayWidth
+ The upper limit of the display width of binary and nchar fields in a shell, beyond which parts will be hidden. Default: 30. You can modify this option dynamically in the shell with the command set max_binary_display_width nn.
+
+## User Management
+
+System administrators can add and delete users in CLI, and also modify passwords. The SQL syntax in the CLI is as follows:
+
+```sql
+CREATE USER PASS <'password'>;
+```
+
+Create a user, and specify the user name and password. The password needs to be enclosed in single quotation marks. The single quotation marks are in English half-width.
+
+```sql
+DROP USER ;
+```
+
+Delete a user, root only.
+
+```sql
+ALTER USER PASS <'password'>;
+```
+
+Modify the user password. In order to avoid being converted to lowercase, the password needs to be quoted in single quotation marks. The single quotation marks are in English half-width
+
+```sql
+ALTER USER PRIVILEGE ;
+```
+
+Modify the user privilege to: write or read, without adding single quotation marks.
+
+Note: There are three privilege levels: super/write/read in the system, but it is not allowed to give super privilege to users through alter instruction at present.
+
+```mysql
+SHOW USERS;
+```
+
+Show all users
+
+**Note:** In SQL syntax, < > indicates the part that requires user to input, but do not enter < > itself
+
+## Import Data
+
+TDengine provides a variety of convenient data import functions, including imported by script file, by data file, and by taosdump tool.
+
+**Import by script file**
+
+TDengine shell supports source filename command, which is used to run SQL statements from a file in batch. Users can write SQL commands such as database building, table building and data writing in the same file. Each command has a separate line. By running source command in the shell, SQL statements in the file can be run in batches in sequence. SQL statements beginning with '#' are considered comments and are automatically ignored by the shell.
+
+**Import by data file**
+
+TDengine also supports data import from CSV files on existing tables in the shell. The CSV file belongs to only one table, and the data format in the CSV file should be the same as the structure of the table to be imported. When importing, its syntax is as follows:
+
+```mysql
+insert into tb1 file 'path/data.csv';
+```
+
+Note: if there is descriptive information in the first line of the CSV file, please delete it manually before importing
+
+For example, there is now a sub-table d1001 whose table structure is as follows:
+
+```mysql
+taos> DESCRIBE d1001
+ Field | Type | Length | Note |
+=================================================================================
+ ts | TIMESTAMP | 8 | |
+ current | FLOAT | 4 | |
+ voltage | INT | 4 | |
+ phase | FLOAT | 4 | |
+ location | BINARY | 64 | TAG |
+ groupid | INT | 4 | TAG |
+```
+
+And the format of the data.csv to import is as follows:
+
+```csv
+'2018-10-04 06:38:05.000',10.30000,219,0.31000
+'2018-10-05 06:38:15.000',12.60000,218,0.33000
+'2018-10-06 06:38:16.800',13.30000,221,0.32000
+'2018-10-07 06:38:05.000',13.30000,219,0.33000
+'2018-10-08 06:38:05.000',14.30000,219,0.34000
+'2018-10-09 06:38:05.000',15.30000,219,0.35000
+'2018-10-10 06:38:05.000',16.30000,219,0.31000
+'2018-10-11 06:38:05.000',17.30000,219,0.32000
+'2018-10-12 06:38:05.000',18.30000,219,0.31000
+```
+
+Then we can use the following command to import:
+
+```mysql
+taos> insert into d1001 file '~/data.csv';
+Query OK, 9 row(s) affected (0.004763s)
+```
+
+**Import via taosdump tool**
+
+TDengine provides a convenient database import and export tool, taosdump. Users can import data exported by taosdump from one system into other systems. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html).
+
+## Export Data
+
+To facilitate data export, TDengine provides two export methods, namely, export by table and export by taosdump.
+
+**Export CSV file by table**
+
+If user needs to export data from a table or a STable, it can run in a shell
+
+```mysql
+select * from >> data.csv;
+```
+
+In this way, the data in table tb_name will be exported to the file data.csv in CSV format.
+
+**Export data by taosdump**
+
+TDengine provides a convenient database export tool, taosdump. Users can choose to export all databases, a database or a table in a database, all data or data for a time period, or even just the definition of a table as needed. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html)
+
+## System Connection and Task Query Management
+
+The system administrator can query the connection, ongoing query and stream computing of the system from CLI, and can close the connection and stop the ongoing query and stream computing. The SQL syntax in the CLI is as follows:
+
+```mysql
+SHOW CONNECTIONS;
+```
+
+Show the connection of the database, and one column shows ip: port, which is the IP address and port number of the connection.
+
+```mysql
+KILL CONNECTION ;
+```
+
+Force the database connection to close, where connection-id is the number in the first column displayed in SHOW CONNECTIONS.
+
+```mysql
+SHOW QUERIES;
+```
+
+Show the data query, where the two numbers separated by colons displayed in the first column are query-id and the connection-id that initiated the query application connection and the number of queries.
+
+```mysql
+KILL QUERY ;
+```
+
+Force to close the data query, where query-id is the connection-id: query-no string displayed in SHOW QUERIES, such as "105: 2", copy and paste it.
+
+```mysql
+SHOW STREAMS;
+```
+
+Show the stream computing, where the first column shows the two numbers separated by colons as stream-id and the connection-id to start the stream application connection and the number of times the stream was initiated.
+
+```mysql
+KILL STREAM ;
+```
+
+Force to turn off the stream computing, in which stream-id is the connection-id: stream-no string displayed in SHOW STREAMS, such as 103: 2, copy and paste it.
+
+## System Monitoring
+
+After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB.
+
+The collection of these monitoring metrics is turned on by default, but you can modify option enableMonitor in the configuration file to turn it off or on.
+
+## File Directory Structure
+
+After installing TDengine, the following directories or files are generated in the operating system by default:
+
+
+
+| **Directory/File** | **Description** |
+| ------------------------- | ------------------------------------------------------------ |
+| /usr/local/taos/bin | TEngine’s executable directory. The executables are connected to the/usr/bin directory via softly links. |
+| /usr/local/taos/connector | TDengine’s various connector directories. |
+| /usr/local/taos/driver | TDengine’s dynamic link library directory. Connect to /usr/lib directory via soft links. |
+| /usr/local/taos/examples | TDengine’s application example directory for various languages. |
+| /usr/local/taos/include | TDengine’s header files of C interface for externally serving. |
+| /etc/taos/taos.cfg | TDengine’s default [configuration files]. |
+| /var/lib/taos | TDengine’s default data file directory, where the local can be modified via [configuration files]. |
+| /var/log/taos | TDengine’s default log file directory, where the local can be modified via [configuration files]. |
+
+**Executables**
+
+All executables of TDengine are stored in the directory /usr/local/taos/bin by default. Including:
+
+- *taosd*: TDengine server-side executable
+- *taos*: TDengine Shell executable
+- *taosdump*: A data import/export tool
+- remove.sh: uninstall the TDengine script, please execute carefully, and link to rmtaos command in the/usr/bin directory. The TDengine installation directory /usr/local/taos will be removed, but/etc/taos,/var/lib/taos,/var/log/taos will remain.
+
+You can configure different data directories and log directories by modifying system configuration file taos.cfg.
+
+## TDengine Parameter Limits and Reserved Keywords
+
+- Database name: cannot contain "." and other special characters, and cannot exceed 32 characters
+- Table name: cannot contain "." and other special characters, and cannot exceed 192 characters together with the database name to which it belongs
+- Table column name: cannot contain special characters, and cannot exceed 64 characters
+- Database name, table name, column name cannot begin with a number
+- Number of columns in table: cannot exceed 1024 columns
+- Maximum length of record: including 8 bytes as timestamp, no more than 16KB (each column of BINARY/NCHAR type will occupy an additional 2 bytes of storage location)
+- Default maximum string length for a single SQL statement: 65480 bytes
+- Number of database replicas: no more than 3
+- User name: no more than 23 bytes
+- User password: no more than 15 bytes
+- Number of Tags: no more than 128
+- Total length of label: cannot exceed 16K bytes
+- Number of records: limited by storage space only
+- Number of tables: limited only by the number of nodes
+- Number of databases: limited only by the number of nodes
+- Number of virtual nodes on a single database: cannot exceed 64
+
+At the moment, TDengine has nearly 200 internal reserved keywords, which cannot be used as database name, table name, STable name, data column name or tag column name regardless of case. The list of these keywords is as follows:
+
+| **List of Keywords** | | | | |
+| -------------------- | ----------- | ------------ | ---------- | --------- |
+| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
+| ABORT | COPY | ID | MODULES | SLIMIT |
+| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
+| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
+| ADD | CTIME | IMMEDIATE | NONE | STABLE |
+| AFTER | DATABASE | IMPORT | NOT | STABLES |
+| ALL | DATABASES | IN | NOTNULL | STAR |
+| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
+| AND | DEFERRED | INSERT | OF | STDDEV |
+| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
+| ASC | DESC | INTEGER | OR | STREAMS |
+| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
+| AVG | DETACH | INTO | PASS | SUM |
+| BEFORE | DIFF | IP | PERCENTILE | TABLE |
+| BEGIN | DISTINCT | IS | PLUS | TABLES |
+| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
+| BIGINT | DNODE | JOIN | PREV | TAGS |
+| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
+| BITAND | DOT | KEY | QUERIES | TBNAME |
+| BITNOT | DOUBLE | KILL | QUERY | TIMES |
+| BITOR | DROP | LAST | RAISE | TIMESTAMP |
+| BOOL | EACH | LE | REM | TINYINT |
+| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
+| BY | EQ | LIKE | REPLICA | TRIGGER |
+| CACHE | EXISTS | LIMIT | RESET | UMINUS |
+| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
+| CHANGE | FAIL | LOCAL | ROW | USE |
+| CLOG | FILL | LP | ROWS | USER |
+| CLUSTER | FIRST | LSHIFT | RP | USERS |
+| COLON | FLOAT | LT | RSHIFT | USING |
+| COLUMN | FOR | MATCH | SCORES | VALUES |
+| COMMA | FROM | MAX | SELECT | VARIABLE |
+| COMP | GE | METRIC | SEMI | VGROUPS |
+| CONCAT | GLOB | METRICS | SET | VIEW |
+| CONFIGS | GRANTS | MIN | SHOW | WAVG |
+| CONFLICT | GROUP | MINUS | SLASH | WHERE |
+| CONNECTION | | | | |
diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f344b45294eb629f2fd5c12be88707b3d3930df
--- /dev/null
+++ b/documentation20/en/12.taos-sql/docs.md
@@ -0,0 +1,1246 @@
+# TAOS SQL
+
+TDengine provides a SQL-style language, TAOS SQL, to insert or query data, and support other common tips. To finish this document, you should have some understanding about SQL.
+
+TAOS SQL is the main tool for users to write and query data to TDengine. TAOS SQL provides a style and mode similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for temporal structured data, the relevant function of data deletion is non-existent in TAO SQL.
+
+Let’s take a look at the conventions used for syntax descriptions.
+
+- The content in < > is what the user needs to enter, but do not enter < > itself
+- [] indicates that the content is optional, but do not enter [] itself
+- "|" means you can select one of multiple choices, but you cannot enter | yourself
+- "…" means repeating for as many times
+
+In order to better explain the rules and characteristics of SQL syntax, this document assumes that there is a data set. Take smart meters as an example, each smart meter collects three metrics: current, voltage and phase. It is modeled as follows:
+
+```mysql
+taos> DESCRIBE meters;
+ Field | Type | Length | Note |
+=================================================================================
+ ts | TIMESTAMP | 8 | |
+ current | FLOAT | 4 | |
+ voltage | INT | 4 | |
+ phase | FLOAT | 4 | |
+ location | BINARY | 64 | TAG |
+ groupid | INT | 4 | TAG |
+```
+
+The data set contains data from four smart meters, which correspond to four sub-tables according to the modeling rules of TDengine, and their names are D1001, D1002, D1003 and D1004 respectively.
+
+## Data Types
+
+With TDengine, the most important thing is timestamp. When creating and inserting records and querying history records, you need to specify a timestamp. The timestamp has the following rules:
+
+- Time Format: 'YYYY-MM-DD HH:mm:ss.MS', default in milliseconds. For example,'2017-08-12 18:52:58.128'
+- Internal Function **now** : this is the current time of the server
+- When inserting a record, if timestamp is NOW, then use the server current time.
+- Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000.
+- Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units.
+
+Default time precision of TDengine is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond.
+
+In TDengine, the following 10 data types can be used in data model of an ordinary table.
+
+| | **Data Type** | **Bytes** | **Note** |
+| ---- | ------------- | --------- | ------------------------------------------------------------ |
+| 1 | TIMESTAMP | 8 | Time stamp. Default in milliseconds, and support microseconds. Starting from 1970-01-01 00:00:00. 000 (UTC/GMT), the timing cannot be earlier than this time. |
+| 2 | INT | 4 | A nullable integer type with a range of [-2^31+1, 2^31-1 ] |
+| 3 | BIGINT | 8 | A nullable integer type with a range of [-2^59, 2^59 ] |
+| 4 | FLOAT | 4 | A standard nullable float type with 6 -7 significant digits and a range of [-3.4E38, 3.4E38] |
+| 5 | DOUBLE | 8 | A standard nullable double float type with 15-16 significant digits and a range of [-1.7E308, 1.7E308] |
+| 6 | BINARY | Custom | Used to record ASCII strings. Theoretically, the maximum length can be 16,374 bytes, but since each row of data can be up to 16K bytes, the actual limit is generally smaller than the theoretical value. Binary only supports string input, and single quotation marks are used at both ends of the string, otherwise all English will be automatically converted to lowercase. When using, the size must be specified. For example, binary (20) defines a string with a maximum length of 20 characters, and each character occupies 1 byte of storage space. In this case, if the user string exceeds 20 bytes, an error will be reported. For single quotation marks in strings, they can be represented by escape character backslash plus single quotation marks, that is\ '. |
+| 7 | SMALLINT | 2 | A nullable integer type with a range of [-32767, 32767] |
+| 8 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] |
+| 9 | BOOL | 1 | Boolean type,{true, false} |
+| 10 | NCHAR | Custom | Used to record non-ASCII strings, such as Chinese characters. Each nchar character takes up 4 bytes of storage space. Single quotation marks are used at both ends of the string, and escape characters are required for single quotation marks in the string, that is \’. When nchar is used, the string size must be specified. A column of type nchar (10) indicates that the string of this column stores up to 10 nchar characters, which will take up 40 bytes of space. If the length of the user string exceeds the declared length, an error will be reported. |
+
+
+
+**Tips**:
+
+1. TDengine is case-insensitive to English characters in SQL statements and automatically converts them to lowercase for execution. Therefore, the user's case-sensitive strings and passwords need to be enclosed in single quotation marks.
+2. Avoid using BINARY type to save non-ASCII type strings, which will easily lead to errors such as garbled data. The correct way is to use NCHAR type to save Chinese characters.
+
+## Database Management
+
+- **Create a Database**
+
+ ```mysql
+ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
+ ```
+
+Note:
+
+1. KEEP is how long the data of the database is kept, the default is 3650 days (10 years), and the database will automatically delete the data expired;
+2. UPDATE marks the database support updating the same timestamp data;
+3. Maximum length of the database name is 33;
+4. Maximum length of a SQL statement is 65480 characters;
+5. Database has more storage-related configuration parameters, see System Management.
+
+- **Show current system parameters**
+
+ ```mysql
+ SHOW VARIABLES;
+ ```
+
+- **Use a database**
+
+ ```mysql
+ USE db_name;
+ ```
+ Use/switch database
+
+- **Drop a database**
+ ```mysql
+ DROP DATABASE [IF EXISTS] db_name;
+ ```
+ Delete a database, all data tables included will be deleted. Please use with caution.
+
+- **Modify database parameters**
+
+ ```mysql
+ ALTER DATABASE db_name COMP 2;
+ ```
+ COMP parameter modifies the database file compression flag bit, with the default value of 2 and the value range is [0, 2]. 0 means no compression, 1 means one-stage compression, and 2 means two-stage compression.
+
+ ```mysql
+ ALTER DATABASE db_name REPLICA 2;
+ ```
+ REPLICA parameter refers to the number of replicas of the modified database, and the value range is [1, 3]. For use in a cluster, the number of replicas must be less than or equal to the number of DNODE.
+
+ ```mysql
+ ALTER DATABASE db_name KEEP 365;
+ ```
+ The KEEP parameter refers to the number of days to save a modified data file. The default value is 3650, and the value range is [days, 365000]. It must be greater than or equal to the days parameter value.
+
+ ```mysql
+ ALTER DATABASE db_name QUORUM 2;
+ ```
+ QUORUM parameter refers to the number of confirmations required for successful data writing, and the value range is [1, 3]. For asynchronous replication, quorum is set to 1, and the virtual node with master role can confirm it by itself. For synchronous replication, it needs to be at least 2 or greater. In principle, Quorum > = 1 and Quorum < = replica number, which is required when starting a synchronization module instance.
+
+ ```mysql
+ ALTER DATABASE db_name BLOCKS 100;
+ ```
+ BLOCKS parameter is the number of cache-sized memory blocks in each VNODE (TSDB), so the memory size used for a VNODE equals roughly (cache * blocks). Value range is [3,1000].
+
+ ```mysql
+ ALTER DATABASE db_name CACHELAST 0;
+ ```
+ CACHELAST parameter controls whether last_row of the data subtable is cached in memory. The default value is 0, and the value range is [0, 1]. Where 0 means not enabled and 1 means enabled. (supported from version 2.0. 11)
+
+ **Tips**: After all the above parameters are modified, show databases can be used to confirm whether the modification is successful.
+
+- **Show all databases in system**
+
+ ```mysql
+ SHOW DATABASES;
+ ```
+
+## Table Management
+
+- Create a table
+Note:
+
+1. The first field must be a timestamp, and system will set it as the primary key;
+2. The max length of table name is 192;
+3. The length of each row of the table cannot exceed 16k characters;
+4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers
+5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes;
+
+- **Create a table via STable**
+
+ ```mysql
+ CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
+ ```
+ Use a STable as template and assign tag values to create a data table.
+
+- **Create a data table using STable as a template and specify a specific tags column**
+
+ ```mysql
+ CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
+ ```
+ Using the specified STable as a template, specify the values of some tags columns to create a data table. (Unspecified tags columns are set to null values.)
+ Note: This method has been supported since version 2.0. 17. In previous versions, tags columns were not allowed to be specified, but the values of all tags columns must be explicitly given.
+
+- **Create tables in batches**
+
+ ```mysql
+ CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
+ ```
+ Create a large number of data tables in batches faster. (Server side 2.0. 14 and above)
+
+ Note:
+ 1. The method of batch creating tables requires that the data table must use STable as a template.
+ 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table building.
+
+- **Drop a table**
+
+ ```mysql
+ DROP TABLE [IF EXISTS] tb_name;
+ ```
+
+- **Show all data table information under the current database**
+
+ ```mysql
+ SHOW TABLES [LIKE tb_name_wildcar];
+ ```
+ Show all data table information under the current database.
+ Note: Wildcard characters can be used to match names in like. The maximum length of this wildcard character string cannot exceed 24 bytes.
+ Wildcard matching: 1) '%' (percent sign) matches 0 to any number of characters; 2) '_' underscore matches one character.
+
+- **Modify display character width online**
+
+ ```mysql
+ SET MAX_BINARY_DISPLAY_WIDTH ;
+ ```
+
+- **Get schema information of a table**
+
+ ```mysql
+ DESCRIBE tb_name;
+ ```
+
+- **Add a column to table**
+
+ ```mysql
+ ALTER TABLE tb_name ADD COLUMN field_name data_type;
+ ```
+ Note:
+ 1. The maximum number of columns is 1024 and the minimum number is 2;
+ 2. The maximum length of a column name is 64;
+
+- **Drop a column in table**
+
+ ```mysql
+ ALTER TABLE tb_name DROP COLUMN field_name;
+ ```
+ If the table is created through a STable, the operation of table schema changing can only be carried out on the STable. Moreover, the schema changes for the STable take effect for all tables created through the schema. For tables that are not created through STables, you can modify the table schema directly.
+
+## STable Management
+
+Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word.
+
+- **Create a STable**
+
+ ```mysql
+ CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
+ ```
+ Similiar to a standard table creation SQL, but you need to specify name and type of TAGS field.
+
+ Note:
+
+ 1. Data types of TAGS column cannot be timestamp;
+ 2. No duplicated TAGS column names;
+ 3. Reversed word cannot be used as a TAGS column name;
+ 4. The maximum number of TAGS is 128, and at least 1 TAG allowed, with a total length of no more than 16k characters.
+
+- **Drop a STable**
+
+ ```mysql
+ DROP STABLE [IF EXISTS] stb_name;
+ ```
+ Drop a STable automatically deletes all sub-tables created through the STable.
+
+- **Show all STable information under the current database**
+
+ ```mysql
+ SHOW STABLES [LIKE tb_name_wildcard];
+ ```
+ View all STables under the current database and relevant information, including name, creation time, column number, tag number, number of tables created through the STable, etc.
+
+- **Obtain schema information of a STable**
+
+ ```mysql
+ DESCRIBE stb_name;
+ ```
+
+- **Add column to STable**
+
+ ```mysql
+ ALTER STABLE stb_name ADD COLUMN field_name data_type;
+ ```
+
+- **Drop column in STable**
+
+ ```mysql
+ ALTER STABLE stb_name DROP COLUMN field_name;
+ ```
+
+## TAG Management in STable
+
+- **Add a tag**
+
+ ```mysql
+ ALTER STABLE stb_name ADD TAG new_tag_name tag_type;
+ ```
+ Add a new tag to the STable and specify a type of the new tag. The total number of tags cannot exceed 128 and the total length does not exceed 16K characters.
+
+- **Drop a tag**
+
+ ```mysql
+ ALTER STABLE stb_name DROP TAG tag_name;
+ ```
+ Delete a tag of STable. After deleting the tag, all sub-tables under the STable will also automatically delete the same tag.
+
+- **Modify a tag name**
+
+ ```mysql
+ ALTER STABLE stb_name CHANGE TAG old_tag_name new_tag_name;
+ ```
+ Modify a tag name of STable. After modifying, all sub-tables under the STable will automatically update the new tag name.
+
+- **Modify a tag value of sub-table**
+
+ ```mysql
+ ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
+ ```
+ Note: Except that the operation of tag value updating is carried out for sub-tables, all other tag operations (adding tags, deleting tags, etc.) can only be applied to STable, and cannot be operated on a single sub-table. After adding a tag to a STable, all tables established based on that will automatically add a new tag, and the default value is NULL.
+
+## Data Writing
+
+- **Insert a record**
+
+ ```mysql
+ INSERT INTO tb_name VALUES (field_value, ...);
+ ```
+ Insert a record into table tb_name.
+
+- **Insert a record with data corresponding to a given column**
+
+ ```mysql
+ INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...);
+ ```
+ Insert a record into table tb_name, and the data corresponds to a given column. For columns that do not appear in the SQL statement, database will automatically populate them with NULL. Primary key (timestamp) cannot be NULL.
+
+- **Insert multiple records**
+
+ ```mysql
+ INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ ```
+ Insert multiple records into table tb_name.
+
+- **Insert multiple records into a given column**
+
+ ```mysql
+ INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ ```
+ Insert multiple records into a given column of table tb_name.
+
+- **Insert multiple records into multiple tables**
+
+ ```mysql
+ INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
+ tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ ```
+ Insert multiple records into tables tb1_name and tb2_name at the same time.
+
+- **Insert multiple records per column into multiple tables**
+
+ ```mysql
+ INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
+ tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
+ ```
+ Insert multiple records per column into tables tb1_name and tb2_name at the same time.
+ Note: The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (days of data retention), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (interval of data storage in the data file, in days). Both keep and days can be specified when the database is created, and the default values are 3650 days and 10 days, respectively.
+
+- Automatically create a table when inserting
+
+ ```mysql
+ INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ ```
+ If user is not sure whether a table exists when writing data, the automatic table building syntax can be used to create a non-existent table when writing. If the table already exists, no new table will be created. When automatically creating a table, it is required to use the STable as a template and specify tags value for the data table.
+
+- **Automatically create a table when inserting, and specify a given tags column**
+
+ ```mysql
+ INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
+ ```
+ During automatic table creation, only the values of some tags columns can be specified, and the unspecified tags columns will be null.
+
+**History writing**: The IMPORT or INSERT command can be used. The syntax and function of IMPORT are exactly the same as those of INSERT.
+
+Note: For SQL statements in insert type, the stream parsing strategy we adopt will still execute the correct part of SQL before the following errors are found. In the following sql, insert statement is invalid, but d1001 will still be created.
+
+```mysql
+taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT);
+Query OK, 0 row(s) affected (0.008245s)
+
+taos> SHOW STABLES;
+ name | created_time | columns | tags | tables |
+============================================================================================
+ meters | 2020-08-06 17:50:27.831 | 4 | 2 | 0 |
+Query OK, 1 row(s) in set (0.001029s)
+
+taos> SHOW TABLES;
+Query OK, 0 row(s) in set (0.000946s)
+
+taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
+
+DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
+
+taos> SHOW TABLES;
+ table_name | created_time | columns | stable_name |
+======================================================================================================
+ d1001 | 2020-08-06 17:52:02.097 | 4 | meters |
+Query OK, 1 row(s) in set (0.001091s)
+```
+
+## Data Query
+
+### Query Syntax:
+
+```mysql
+SELECT select_expr [, select_expr ...]
+ FROM {tb_name_list}
+ [WHERE where_condition]
+ [INTERVAL (interval_val [, interval_offset])]
+ [SLIDING sliding_val]
+ [FILL fill_val]
+ [GROUP BY col_list]
+ [ORDER BY col_list { DESC | ASC }]
+ [SLIMIT limit_val [SOFFSET offset_val]]
+ [LIMIT limit_val [OFFSET offset_val]]
+ [>> export_file];
+```
+
+#### SELECT Clause
+
+A select clause can be a subquery of UNION and another query.
+
+#### Wildcard character
+
+The wildcard \* can be used to refer to all columns. For ordinary tables, there’re only ordinary columns in results.
+
+```mysql
+taos> SELECT * FROM d1001;
+ ts | current | voltage | phase |
+======================================================================================
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 |
+ 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
+Query OK, 3 row(s) in set (0.001165s)
+```
+
+For Stables, wildcards contain *tag columns*.
+
+```mysql
+taos> SELECT * FROM meters;
+ ts | current | voltage | phase | location | groupid |
+=====================================================================================================================================
+ 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 |
+ 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 |
+ 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 |
+ 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 |
+ 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 |
+ 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 |
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 |
+ 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 |
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 |
+Query OK, 9 row(s) in set (0.002022s)
+```
+
+Wildcards support table name prefixes, the two following SQL statements will return all columns:
+
+```mysql
+SELECT * FROM d1001;
+SELECT d1001.* FROM d1001;
+```
+
+In Join query, the results returned by \* with prefix and \* without prefix are different. \* returns all column data of all tables (excluding tags), while wildcards with prefix only return column data of the corresponding table.
+
+```mysql
+taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
+ ts | current | voltage | phase | ts | current | voltage | phase |
+==================================================================================================================================
+ 2018-10-03 14:38:05.000 | 10.30000| 219 | 0.31000 | 2018-10-03 14:38:05.000 | 10.80000| 223 | 0.29000 |
+Query OK, 1 row(s) in set (0.017385s)
+```
+```mysql
+taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
+ ts | current | voltage | phase |
+======================================================================================
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 |
+Query OK, 1 row(s) in set (0.020443s)
+```
+
+In the process of using SQL functions for query, some SQL functions support wildcard operation. The difference is that the `count(\*)` function returns only one column, but the `first`,`last`,`last_row` functions return all columns.
+
+```mysql
+taos> SELECT COUNT(*) FROM d1001;
+ count(*) |
+========================
+ 3 |
+Query OK, 1 row(s) in set (0.001035s)
+```
+
+```mysql
+taos> SELECT FIRST(*) FROM d1001;
+ first(ts) | first(current) | first(voltage) | first(phase) |
+=========================================================================================
+ 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 |
+Query OK, 1 row(s) in set (0.000849s)
+```
+
+#### Tag Column
+
+Since version 2.0. 14, it is supported to specify *tag column* in queries of ordinary tables, and the values of tag columns will be returned together with the data of other ordinary columns.
+
+```mysql
+taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
+ location | groupid | current |
+======================================================================
+ Beijing.Chaoyang | 2 | 10.30000 |
+ Beijing.Chaoyang | 2 | 12.60000 |
+Query OK, 2 row(s) in set (0.003112s)
+```
+
+Note: The wildcard \* of ordinary tables does not contain *tag columns*.
+
+#### Obtain the de-duplicated value of a tag column
+
+Since version 2.0. 15, it is supported to specify `DISTINCT` keyword when querying tag columns in STables, which will return all non-duplicate values of given tag columns.
+
+```mysql
+SELECT DISTINCT tag_name FROM stb_name;
+```
+
+Note: At present, `DISTINCT` keyword only supports deduplication of tag columns of STables, and cannot be used for ordinary columns.
+
+#### Column name in result set
+
+In `SELECT` clause, if there’s no returning of column name in result set, the result set column name defaults to the expression name in `SELECT` clause as the column name. In addition, user can use `AS` to rename the columns in the returned result set. For example:
+
+```mysql
+taos> SELECT ts, ts AS primary_key_ts FROM d1001;
+ ts | primary_key_ts |
+====================================================
+ 2018-10-03 14:38:05.000 | 2018-10-03 14:38:05.000 |
+ 2018-10-03 14:38:15.000 | 2018-10-03 14:38:15.000 |
+ 2018-10-03 14:38:16.800 | 2018-10-03 14:38:16.800 |
+Query OK, 3 row(s) in set (0.001191s)
+```
+
+However, renaming for one single column is not supported for `first(*)`,`last(*)`,`last_row(*)`.
+
+#### Implicit result column
+
+`Select_exprs` can be the name of a column belongs to a table, or it can be a column-based functional expression or calculation formula, with an upper limit of 256. When user uses `interval` or `group by tags` clause, the timestamp column (the first column) and the tag column in `group by` clause are forced to be returned in the final returned result. Later versions can support turning off the output of implicit columns in `group by` clause, and the column output is completely controlled by select clause.
+
+#### List of STable
+
+The `FROM` keyword can be followed by a list of several tables (STables) or result of a subquery.
+
+If you do not specify user's current database, you can use the database name before the table name to specify the database to which the table belongs. For example: `power.d1001` to use tables across databases.
+
+```mysql
+SELECT * FROM power.d1001;
+------------------------------
+USE power;
+SELECT * FROM d1001;
+```
+
+#### Special Functions
+
+Some special query functions can be performed without using FROM clause. Obtain the current database database ():
+
+```mysql
+taos> SELECT DATABASE();
+ database() |
+=================================
+ power |
+Query OK, 1 row(s) in set (0.000079s)
+```
+
+If no default database is specified when logging in, and `USE` command is not used to switch data, then `NULL` is returned.
+
+```mysql
+taos> SELECT DATABASE();
+ database() |
+=================================
+ NULL |
+Query OK, 1 row(s) in set (0.000184s)
+```
+
+Get server and client version numbers:
+
+```mysql
+taos> SELECT CLIENT_VERSION();
+ client_version() |
+===================
+ 2.0.0.0 |
+Query OK, 1 row(s) in set (0.000070s)
+
+taos> SELECT SERVER_VERSION();
+ server_version() |
+===================
+ 2.0.0.0 |
+Query OK, 1 row(s) in set (0.000077s)
+```
+
+A server state detection statement. If server is normal, return a number (for example, 1). If server is exceptional, return error code. The SQL syntax can be compatible with the check of TDengine status by connection pool and the check of database server status by third-party tools. And can avoid connection loss of connection pool caused by using a wrong heartbeat detection SQL statement.
+
+```mysql
+taos> SELECT SERVER_STATUS();
+ server_status() |
+==================
+ 1 |
+Query OK, 1 row(s) in set (0.000074s)
+
+taos> SELECT SERVER_STATUS() AS status;
+ status |
+==============
+ 1 |
+Query OK, 1 row(s) in set (0.000081s)
+```
+
+#### Special keywords in TAOS SQL
+
+> `TBNAME`: It can be regarded as a special tag in a STable query, representing the name of sub-table involved in the query
+>
+> _c0: Represents the first column of a table (STable)
+
+#### Tips
+
+Get all sub-table names and related tags information of a STable:
+
+```mysql
+SELECT TBNAME, location FROM meters;
+```
+
+Statistics of sub-tables number under a STable:
+
+```mysql
+SELECT COUNT(TBNAME) FROM meters;
+```
+
+The two queries above only support adding filters for TAGS in Where conditional clause. For example:
+
+```mysql
+taos> SELECT TBNAME, location FROM meters;
+ tbname | location |
+==================================================================
+ d1004 | Beijing.Haidian |
+ d1003 | Beijing.Haidian |
+ d1002 | Beijing.Chaoyang |
+ d1001 | Beijing.Chaoyang |
+Query OK, 4 row(s) in set (0.000881s)
+
+taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
+ count(tbname) |
+========================
+ 2 |
+Query OK, 1 row(s) in set (0.001091s)
+```
+
+- You can use \* to return all columns, or given column names. Four operations can be performed on numeric columns, and column names can be given to output columns.
+- `WHERE` statement can use various logical decisions to filter numeric values, or wildcards to filter strings
+- The output is sorted by default in ascending order by timestamps in the first column, but you can specify descending order (\_c0 refers to the first column timestamp). It is illegal to use ORDER BY to sort other fields.
+- Parameter LIMIT controls the number of outputs, and OFFSET specifies which output starts from. LIMIT/OFFSET executes the result set after ORDER BY.
+- "> >" output can be exported to a specified file
+
+#### Supported Filtering Operations
+
+| **Operation** | **Note** | **Applicable Data Types** |
+| ------------- | ----------------------------- | ----------------------------------- |
+| > | larger than | **timestamp** and all numeric types |
+| < | smaller than | **timestamp** and all numeric types |
+| >= | larger than or equal to | **timestamp** and all numeric types |
+| <= | smaller than or equal to | **timestamp** and all numeric types |
+| = | equal to | all types |
+| <> | not equal to | all types |
+| between and | within a certain range | **timestamp** and all numeric types |
+| % | match with any char sequences | **binary** **nchar** |
+| _ | match with a single char | **binary** **nchar** |
+
+1. To filter the range of multiple fields at the same time, you need to use keyword AND to connect different query conditions. The query filtering between different columns connected by OR are not supported at the moment.
+2. For filtering a single field, if it is a time filtering condition, only one condition in a statement can be set; however, for other (ordinary) columns or tag columns, OR keyword can be used for query filtering of combined conditions. For example: ((value > 20 AND value < 30) OR (value < 12)).
+3. Since version 2.0. 17, condition filtering supports BETWEEN AND syntax. For example, WHERE col2 BETWEEN 1.5 AND 3.25 means that the query condition is "1.5 ≤ col2 ≤ 3.25".
+
+### SQL Example
+
+- For example, table tb1 is created with the following statement
+
+ ```mysql
+ CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
+ ```
+
+- Query all records of the last hour of tb1
+
+ ```mysql
+ SELECT * FROM tb1 WHERE ts >= NOW - 1h;
+ ```
+
+- Look up table tb1 from 2018-06-01 08:00:00. 000 to 2018-06-02 08:00:00. 000, and col3 string is a record ending in'nny ', and the result is in descending order of timestamp:
+
+ ```mysql
+ SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
+ ```
+
+- Query the sum of col1 and col2, and name it complex. The time is greater than 2018-06-01 08:00:00. 000, and col2 is greater than 1.2. As a result, only 10 records are outputted, starting from item 5
+
+ ```mysql
+ SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
+ ```
+
+- Query the records of past 10 minutes, the value of col2 is greater than 3.14, and output the result to the file /home/testoutpu.csv.
+
+ ```mysql
+ SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
+ ```
+
+
+
+## SQL Functions
+
+TDengine supports aggregations over data, they are listed below:
+
+- **COUNT**
+
+ ```mysql
+ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
+ ```
+ Function: record the number of rows or non-null values in a column of statistics/STable.
+
+ Returned result data type: long integer INT64.
+
+ Applicable Fields: Applied to all fields.
+
+ Applied to: **table, STable**.
+
+ Note:
+ 1. You can use \* instead of specific fields, and use *() to return the total number of records.
+ 2. The query results for fields of the same table (excluding NULL values) are the same.
+ 3. If the statistic object is a specific column, return the number of records with non-NULL values in that column.
+
+ Example:
+
+ ```mysql
+ taos> SELECT COUNT(*), COUNT(voltage) FROM meters;
+ count(*) | count(voltage) |
+ ================================================
+ 9 | 9 |
+ Query OK, 1 row(s) in set (0.004475s)
+
+ taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
+ count(*) | count(voltage) |
+ ================================================
+ 3 | 3 |
+ Query OK, 1 row(s) in set (0.001075s)
+ ```
+
+- **AVG**
+
+ ```mysql
+ SELECT AVG(field_name) FROM tb_name [WHERE clause];
+ ```
+ Function: return the average value of a column in statistics/STable.
+
+ Return Data Type: double.
+
+ Applicable Fields: all types except timestamp, binary, nchar, bool.
+
+ Applied to: **table,STable**.
+
+ Example:
+
+ ```mysql
+ taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM meters;
+ avg(current) | avg(voltage) | avg(phase) |
+ ====================================================================================
+ 11.466666751 | 220.444444444 | 0.293333333 |
+ Query OK, 1 row(s) in set (0.004135s)
+
+ taos> SELECT AVG(current), AVG(voltage), AVG(phase) FROM d1001;
+ avg(current) | avg(voltage) | avg(phase) |
+ ====================================================================================
+ 11.733333588 | 219.333333333 | 0.316666673 |
+ Query OK, 1 row(s) in set (0.000943s)
+ ```
+
+- **TWA**
+
+ ```mysql
+ SELECT TWA(field_name) FROM tb_name WHERE clause;
+ ```
+
+ Function: Time weighted average function. The time-weighted average of a column in a statistical table over a period of time.
+
+ Return Data Type: double.
+
+ Applicable Fields: all types except timestamp, binary, nchar, bool.
+
+ Applied to: **table**.
+
+- **SUM**
+
+ ```mysql
+ SELECT SUM(field_name) FROM tb_name [WHERE clause];
+ ```
+
+ Function: return the sum of a statistics/STable.
+
+ Return Data Type: long integer INMT64 and Double.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Applied to: **table,STable**.
+
+ Example:
+
+ ```mysql
+ taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM meters;
+ sum(current) | sum(voltage) | sum(phase) |
+ ================================================================================
+ 103.200000763 | 1984 | 2.640000001 |
+ Query OK, 1 row(s) in set (0.001702s)
+
+ taos> SELECT SUM(current), SUM(voltage), SUM(phase) FROM d1001;
+ sum(current) | sum(voltage) | sum(phase) |
+ ================================================================================
+ 35.200000763 | 658 | 0.950000018 |
+ Query OK, 1 row(s) in set (0.000980s)
+ ```
+
+- **STDDEV**
+
+ ```mysql
+ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
+ ```
+
+ Function: Mean square deviation of a column in statistics table.
+
+ Return Data Type: Double.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Applied to: **table**. (also support **STable** since version 2.0.15.1)
+
+ Example:
+
+ ```mysql
+ taos> SELECT STDDEV(current) FROM d1001;
+ stddev(current) |
+ ============================
+ 1.020892909 |
+ Query OK, 1 row(s) in set (0.000915s)
+ ```
+
+- **LEASTSQUARES**
+ ```mysql
+ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
+ ```
+ Function: Value of a column in statistical table is a fitting straight equation of primary key (timestamp). Start_val is the initial value of independent variable, and step_val is the step size value of independent variable.
+
+ Return Data Type: String expression (slope, intercept).
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note: Independent variable is the timestamp, and dependent variable is the value of the column.
+
+ Applied to: **table**.
+
+ Example:
+ ```mysql
+ taos> SELECT LEASTSQUARES(current, 1, 1) FROM d1001;
+ leastsquares(current, 1, 1) |
+ =====================================================
+ {slop:1.000000, intercept:9.733334} |
+ Query OK, 1 row(s) in set (0.000921s)
+ ```
+
+### Selector Functions
+
+- **MIN**
+ ```mysql
+ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
+ ```
+ Function: return the minimum value of a specific column in statistics/STable.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Example:
+
+ ```mysql
+ taos> SELECT MIN(current), MIN(voltage) FROM meters;
+ min(current) | min(voltage) |
+ ======================================
+ 10.20000 | 218 |
+ Query OK, 1 row(s) in set (0.001765s)
+
+ taos> SELECT MIN(current), MIN(voltage) FROM d1001;
+ min(current) | min(voltage) |
+ ======================================
+ 10.30000 | 218 |
+ Query OK, 1 row(s) in set (0.000950s)
+ ```
+
+- **MAX**
+
+ ```mysql
+ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+
+ Function: return the maximum value of a specific column in statistics/STable.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Example:
+
+ ```mysql
+ taos> SELECT MAX(current), MAX(voltage) FROM meters;
+ max(current) | max(voltage) |
+ ======================================
+ 13.40000 | 223 |
+ Query OK, 1 row(s) in set (0.001123s)
+
+ taos> SELECT MAX(current), MAX(voltage) FROM d1001;
+ max(current) | max(voltage) |
+ ======================================
+ 12.60000 | 221 |
+ Query OK, 1 row(s) in set (0.000987s)
+ ```
+
+- **FIRST**
+
+ ```mysql
+ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+
+ Function: The first non-NULL value written into a column in statistics/STable.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types.
+
+ Note:
+ 1. To return the first (minimum timestamp) non-NULL value of each column, use FIRST (\*);
+ 2. if all columns in the result set are NULL values, the return result of the column is also NULL;
+ 3. If all columns in the result set are NULL values, no result is returned.
+
+ Example:
+
+ ```mysql
+ taos> SELECT FIRST(*) FROM meters;
+ first(ts) | first(current) | first(voltage) | first(phase) |
+ =========================================================================================
+ 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 |
+ Query OK, 1 row(s) in set (0.004767s)
+
+ taos> SELECT FIRST(current) FROM d1002;
+ first(current) |
+ =======================
+ 10.20000 |
+ Query OK, 1 row(s) in set (0.001023s)
+ ```
+
+-
+
+- **LAST**
+
+ ```mysql
+ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+
+ Function: The last non-NULL value written by the value of a column in statistics/STable.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types.
+
+ Note:
+ 1. To return the last (maximum timestamp) non-NULL value of each column, use LAST (\*);
+ 2. If a column in the result set has a NULL value, the returned result of the column is also NULL; if all columns in the result set have NULL values, no result is returned.
+
+ Example:
+
+ ```mysql
+ taos> SELECT LAST(*) FROM meters;
+ last(ts) | last(current) | last(voltage) | last(phase) |
+ ========================================================================================
+ 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
+ Query OK, 1 row(s) in set (0.001452s)
+
+ taos> SELECT LAST(current) FROM d1002;
+ last(current) |
+ =======================
+ 10.30000 |
+ Query OK, 1 row(s) in set (0.000843s)
+ ```
+
+- **TOP**
+
+ ```mysql
+ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+ Function: The top k non-NULL values of a column in statistics/STable. If there are more than k column values tied for the largest, the one with smaller timestamp is returned.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note:
+ 1. The range of *k* value is 1≤*k*≤100;
+ 2. System also returns the timestamp column associated with the record.
+
+ Example:
+
+ ```mysql
+ taos> SELECT TOP(current, 3) FROM meters;
+ ts | top(current, 3) |
+ =================================================
+ 2018-10-03 14:38:15.000 | 12.60000 |
+ 2018-10-03 14:38:16.600 | 13.40000 |
+ 2018-10-03 14:38:16.800 | 12.30000 |
+ Query OK, 3 row(s) in set (0.001548s)
+
+ taos> SELECT TOP(current, 2) FROM d1001;
+ ts | top(current, 2) |
+ =================================================
+ 2018-10-03 14:38:15.000 | 12.60000 |
+ 2018-10-03 14:38:16.800 | 12.30000 |
+ Query OK, 2 row(s) in set (0.000810s)
+ ```
+
+- **BOTTOM**
+
+ ```mysql
+ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+ Function: The last k non-NULL values of a column in statistics/STable. If there are more than k column values tied for the smallest, the one with smaller timestamp is returned.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note:
+ 1. The range of *k* value is 1≤*k*≤100;
+ 2. System also returns the timestamp column associated with the record.
+
+ Example:
+
+ ```mysql
+ taos> SELECT BOTTOM(voltage, 2) FROM meters;
+ ts | bottom(voltage, 2) |
+ ===============================================
+ 2018-10-03 14:38:15.000 | 218 |
+ 2018-10-03 14:38:16.650 | 218 |
+ Query OK, 2 row(s) in set (0.001332s)
+
+ taos> SELECT BOTTOM(current, 2) FROM d1001;
+ ts | bottom(current, 2) |
+ =================================================
+ 2018-10-03 14:38:05.000 | 10.30000 |
+ 2018-10-03 14:38:16.800 | 12.30000 |
+ Query OK, 2 row(s) in set (0.000793s)
+ ```
+
+- **PERCENTILE**
+ ```mysql
+ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
+ ```
+ Function: Percentile of the value of a column in statistical table.
+
+ Return Data Type: Double.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note: The range of P value is 0 ≤ P ≤ 100. P equals to MIN when, and equals MAX when it’s 100.
+
+ Example:
+
+ ```mysql
+ taos> SELECT PERCENTILE(current, 20) FROM d1001;
+ percentile(current, 20) |
+ ============================
+ 11.100000191 |
+ Query OK, 1 row(s) in set (0.000787s)
+ ```
+
+- **APERCENTILE**
+ ```mysql
+ SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+ Function: The value percentile of a column in statistical table is similar to the PERCENTILE function, but returns approximate results.
+
+ Return Data Type: Double.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note: The range of *P* value is 0 ≤ *P* ≤ 100. *P* equals to MIN when, and equals MAX when it’s 100. APERCENTILE function is recommended, which performs far better than PERCENTILE function.
+
+- **LAST_ROW**
+ ```mysql
+ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
+ ```
+ Function: Return the last record of a table (STtable).
+
+ Return Data Type: Double.
+
+ Applicable Fields: All types.
+
+ Note: Unlike last function, last_row does not support time range restriction and forces the last record to be returned.
+
+ Example:
+
+ ```mysql
+ taos> SELECT LAST_ROW(current) FROM meters;
+ last_row(current) |
+ =======================
+ 12.30000 |
+ Query OK, 1 row(s) in set (0.001238s)
+
+ taos> SELECT LAST_ROW(current) FROM d1002;
+ last_row(current) |
+ =======================
+ 10.30000 |
+ Query OK, 1 row(s) in set (0.001042s)
+ ```
+
+### Computing Functions
+
+- **DIFF**
+ ```mysql
+ SELECT DIFF(field_name) FROM tb_name [WHERE clause];
+ ```
+ Function: Return the value difference between a column and the previous column.
+
+ Return Data Type: Same as applicable fields.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note: The number of output result lines is the total number of lines in the range minus one, and there is no result output in the first line.
+
+ Example:
+
+ ```mysql
+ taos> SELECT DIFF(current) FROM d1001;
+ ts | diff(current) |
+ =================================================
+ 2018-10-03 14:38:15.000 | 2.30000 |
+ 2018-10-03 14:38:16.800 | -0.30000 |
+ Query OK, 2 row(s) in set (0.001162s)
+ ```
+
+- **SPREAD**
+
+ ```mysql
+ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ ```
+ Function: Return the difference between the max value and the min value of a column in statistics /STable.
+
+ Return Data Type: Double.
+
+ Applicable Fields: All types except binary, nchar, bool.
+
+ Note: Applicable for TIMESTAMP field, which indicates the time range of a record.
+
+ Example:
+
+ ```mysql
+ taos> SELECT SPREAD(voltage) FROM meters;
+ spread(voltage) |
+ ============================
+ 5.000000000 |
+ Query OK, 1 row(s) in set (0.001792s)
+
+ taos> SELECT SPREAD(voltage) FROM d1001;
+ spread(voltage) |
+ ============================
+ 3.000000000 |
+ Query OK, 1 row(s) in set (0.000836s)
+ ```
+
+- **Four Operations**
+
+ ```mysql
+ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
+ ```
+ Function: Calculation results of addition, subtraction, multiplication, division and remainder of values in a column or among multiple columns in statistics/STable.
+
+ Returned Data Type: Double.
+
+ Applicable Fields: All types except timestamp, binary, nchar, bool.
+
+ Note:
+
+ 1. Calculation between two or more columns is supported, and the calculation priorities can be controlled by parentheses();
+ 2. The NULL field does not participate in the calculation. If a row involved in calculation contains NULL, the calculation result of the row is NULL.
+
+## Time-dimension Aggregation
+
+TDengine supports aggregating by intervals. Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows:
+
+```mysql
+SELECT function_list FROM tb_name
+ [WHERE where_condition]
+ INTERVAL (interval [, offset])
+ [SLIDING sliding]
+ [FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
+
+SELECT function_list FROM stb_name
+ [WHERE where_condition]
+ INTERVAL (interval [, offset])
+ [SLIDING sliding]
+ [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]
+ [GROUP BY tags]
+```
+
+- The length of aggregation interval is specified by keyword INTERVAL, the min time interval is 10 milliseconds (10a), and offset is supported (the offset must be less than interval). In aggregation queries, the aggregator and selector functions that can be executed simultaneously are limited to functions with one single output: count, avg, sum, stddev, leastsquares, percentile, min, max, first, last. Functions with multiple rows of output results (such as top, bottom, diff, and four operations) cannot be used.
+
+- WHERE statement specifies the start and end time of a query and other filters
+
+- FILL statement specifies a filling mode when data missed in a certain interval. Applicable filling modes include the following:
+
+ 1. Do not fill: NONE (default filingl mode).
+ 2. VALUE filling: Fixed value filling, where the filled value needs to be specified. For example: fill (VALUE, 1.23).
+ 3. NULL filling: Fill the data with NULL. For example: fill (NULL).
+ 4. PREV filling: Filling data with the previous non-NULL value. For example: fill (PREV).
+ 5. NEXT filling: Filling data with the next non-NULL value. For example: fill (NEXT).
+
+Note:
+
+ 1. When using a FILL statement, a large number of filling outputs may be generated. Be sure to specify the time interval for the query. For each query, system can return no more than 10 million results with interpolation.
+ 2. In a time-dimension aggregation, the time-series in returned results increases strictly monotonously.
+ 3. If the query object is a STable, the aggregator function will act on the data of all tables under the STable that meet the value filters. If group by statement is not used in the query, the returned result increases strictly monotonously according to time-series; If group by statement is used to group in the query, each group in the returned result does not increase strictly monotonously according to time-series.
+
+Example: The statement for building a database for smart meter is as follows:
+
+```mysql
+CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
+```
+
+According to the data collected by the smart meter, the average value, maximum value, median current of current data in the past 24 hours are calculated in a phase of 10 minutes, and the current trend with time changes is fitted to a straight line. If there is no calculated value, fill it with the previous non-NULL value. The query statement used is as follows:
+
+```mysql
+SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
+ WHERE ts>=NOW-1d
+ INTERVAL(10m)
+ FILL(PREV);
+```
+
+## TAOS SQL Boundary Restrictions
+
+- Max database name length is 32
+- Max length of table name is 192, and max length of each data row is 16k characters
+- Max length of column name is 64, max number of columns allowed is 1024, and min number of columns allowed is 2. The first column must be a timestamp
+- Max number of tags allowed is 128, down to 1, and total length of tags does not exceed 16k characters
+- Max length of SQL statement is 65480 characters, but it can be modified by system configuration parameter maxSQLLength, and max length can be configured to 1M
+- Number of databases, STables and tables are not limited by system, but only limited by system resources
+
+## Other TAOS SQL Conventions
+
+**Restrictions on group by**
+
+TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and whichhas less than 100,000 unique values.
+
+**Restrictions on join operation**
+
+TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four operations after tables aggregated for the time being.
+
+**Availability of is no null**
+
+Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types.
diff --git a/documentation20/en/13.faq/docs.md b/documentation20/en/13.faq/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..05507e26e5ab84a01e19d9ecced5e0464c1411f3
--- /dev/null
+++ b/documentation20/en/13.faq/docs.md
@@ -0,0 +1,161 @@
+# FAQ
+
+Tutorials & FAQ
+
+## 0.How to report an issue?
+
+If the contents in FAQ cannot help you and you need the technical support and assistance of TDengine team, please package the contents in the following two directories:
+
+1./var/log/taos (if default path has not been modified)
+
+2./etc/taos
+
+Provide the necessary description of the problem, including the version information of TDengine used, the platform environment information, the execution operation of the problem, the characterization of the problem and the approximate time, and submit the Issue on [GitHub](https://github.com/taosdata/TDengine).
+
+To ensure that there is enough debug information, if the problem can be repeated, please modify the/etc/taos/taos.cfg file, add a line of "debugFlag 135" at the end (without quotation marks themselves), then restart taosd, repeat the problem, and then submit. You can also temporarily set the log level of taosd through the following SQL statement.
+
+```
+ alter dnode debugFlag 135;
+```
+
+However, when the system is running normally, please set debugFlag to 131, otherwise a large amount of log information will be generated and the system efficiency will be reduced.
+
+## 1.What should I pay attention to when upgrading TDengine from older versions to 2.0 and above? ☆☆☆
+
+Version 2.0 is a complete refactoring of the previous version, and the configuration and data files are incompatible. Be sure to do the following before upgrading:
+
+1. Delete the configuration file, execute sudo rm `-rf /etc/taos/taos.cfg`
+2. Delete the log file, execute `sudo rm -rf /var/log/taos/`
+3. By ensuring that the data is no longer needed, delete the data file and execute `sudo rm -rf /var/lib/taos/`
+4. Install the latest stable version of TDengine
+5. If you need to migrate data or the data file is corrupted, please contact the official technical support team of TAOS Data to assist
+
+## 2. When encoutered with the error " Unable to establish connection " in Windows, what can I do?
+
+See the [technical blog](https://www.taosdata.com/blog/2019/12/03/jdbcdriver%E6%89%BE%E4%B8%8D%E5%88%B0%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%BA%93/) for this issue.
+
+## 3. Why I get “more dnodes are needed” when create a table?
+
+See the [technical blog](https://www.taosdata.com/blog/2019/12/03/%E5%88%9B%E5%BB%BA%E6%95%B0%E6%8D%AE%E8%A1%A8%E6%97%B6%E6%8F%90%E7%A4%BAmore-dnodes-are-needed/) for this issue.
+
+## 4. How do I generate a core file when TDengine crashes?
+
+See the [technical blog](https://www.taosdata.com/blog/2019/12/06/tdengine-crash%E6%97%B6%E7%94%9F%E6%88%90core%E6%96%87%E4%BB%B6%E7%9A%84%E6%96%B9%E6%B3%95/) for this issue.
+
+## 5. What should I do if I encounter an error "Unable to establish connection"?
+
+When the client encountered a connection failure, please follow the following steps to check:
+
+1. Check your network environment
+
+2. - Cloud server: Check whether the security group of the cloud server opens access to TCP/UDP ports 6030-6042
+ - Local virtual machine: Check whether the network can be pinged, and try to avoid using localhost as hostname
+ - Corporate server: If you are in a NAT network environment, be sure to check whether the server can return messages to the client
+
+2. Make sure that the client and server version numbers are exactly the same, and the open source Community Edition and Enterprise Edition cannot be mixed.
+3. On the server, execute systemctl status taosd to check the running status of *taosd*. If not running, start *taosd*.
+4. Verify that the correct server FQDN (Fully Qualified Domain Name, which is available by executing the Linux command hostname-f on the server) is specified when the client connects. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
+5. Ping the server FQDN. If there is no response, please check your network, DNS settings, or the system hosts file of the computer where the client is located.
+6. Check the firewall settings (Ubuntu uses ufw status, CentOS uses firewall-cmd-list-port) to confirm that TCP/UDP ports 6030-6042 are open.
+7. For JDBC (ODBC, Python, Go and other interfaces are similar) connections on Linux, make sure that libtaos.so is in the directory /usr/local/taos/driver, and /usr/local/taos/driver is in the system library function search path LD_LIBRARY_PATH.
+8. For JDBC, ODBC, Python, Go, etc. connections on Windows, make sure that C:\ TDengine\ driver\ taos.dll is in your system library function search directory (it is recommended that taos.dll be placed in the directory C:\ Windows\ System32)
+9. If the connection issue still exist
+
+1. - On Linux system, please use the command line tool nc to determine whether the TCP and UDP connections on the specified ports are unobstructed. Check whether the UDP port connection works: nc -vuz {hostIP} {port} Check whether the server-side TCP port connection works: nc -l {port}Check whether the client-side TCP port connection works: nc {hostIP} {port}
+ - Windows systems use the PowerShell command Net-TestConnection-ComputerName {fqdn} Port {port} to detect whether the service-segment port is accessed
+
+10. You can also use the built-in network connectivity detection function of taos program to verify whether the specified port connection between the server and the client is unobstructed (including TCP and UDP): [TDengine's Built-in Network Detection Tool Use Guide](https://www.taosdata.com/blog/2020/09/08/1816.html).
+
+
+
+## 6.What to do if I encounter an error "Unexpected generic error in RPC" or "TDengine error: Unable to resolve FQDN"?
+
+This error occurs because the client or data node cannot parse the FQDN (Fully Qualified Domain Name). For TAOS shell or client applications, check the following:
+
+1. Please verify whether the FQDN of the connected server is correct. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
+2. If the network is configured with a DNS server, check that it is working properly.
+3. If the network does not have a DNS server configured, check the hosts file of the machine where the client is located to see if the FQDN is configured and has the correct IP address.
+4. If the network configuration is OK, from the machine where the client is located, you need to be able to ping the connected FQDN, otherwise the client cannot connect to the server
+
+## 7.Although the syntax is corrected, why do I still get the “Invalid SQL" error?
+
+If you confirm that the syntax is correct, for versions older than 2.0, please check whether the SQL statement length exceeds 64K. If it does, this error will also be returned.
+
+## 8. Are “validation queries” supported?
+
+The TDengine does not yet have a dedicated set of validation queries. However, it is recommended to use the database "log" monitored by the system.
+
+## 9. Can I delete or update a record?
+
+TDengine does not support the deletion function at present, and may support it in the future according to user requirements.
+
+Starting from 2.0. 8.0, TDengine supports the function of updating written data. Using the update function requires using UPDATE 1 parameter when creating the database, and then you can use INSERT INTO command to update the same timestamp data that has been written. UPDATE parameter does not support ALTER DATABASE command modification. Without a database created using UPDATE 1 parameter, writing data with the same timestamp will not modify the previous data with no error reported.
+
+It should also be noted that when UPDATE is set to 0, the data with the same timestamp sent later will be discarded directly, but no error will be reported, and will still be included in affected rows (so the return information of INSERT instruction cannot be used for timestamp duplicate checking). The main reason for this design is that TDengine regards the written data as a stream. Regardless of whether the timestamp conflicts or not, TDengine believes that the original device that generates the data actually generates such data. The UPDATE parameter only controls how such stream data should be processed when persistence-when UPDATE is 0, it means that the data written first overwrites the data written later; When UPDATE is 1, it means that the data written later overwrites the data written first. How to choose this coverage relationship depends on whether the data generated first or later is expected in the subsequent use and statistics compile.
+
+## 10. How to create a table with more than 1024 columns?
+
+Using version 2.0 and above, 1024 columns are supported by default; for older versions, TDengine allowed the creation of a table with a maximum of 250 columns. However, if the limit is exceeded, it is recommended to logically split this wide table into several small ones according to the data characteristics.
+
+## 11. What is the most effective way to write data?
+
+Insert in batches. Each write statement can insert multiple records into one or multiple tables at the same time.
+
+## 12. What is the most effective way to write data? How to solve the problem that Chinese characters in nchar inserted under Windows systems are parsed into messy code?
+
+If there are Chinese characters in nchar data under Windows, please first confirm that the region of the system is set to China (which can be set in the Control Panel), then the taos client in cmd should already support it normally; If you are developing Java applications in an IDE, such as Eclipse and Intellij, please confirm that the file code in the IDE is GBK (this is the default coding type of Java), and then initialize the configuration of the client when generating the Connection. The specific statement is as follows:
+
+```JAVA
+Class.forName("com.taosdata.jdbc.TSDBDriver");
+Properties properties = new Properties();
+properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
+Connection = DriverManager.getConnection(url, properties);
+```
+
+## 13. JDBC error: the excluded SQL is not a DML or a DDL?
+
+Please update to the latest JDBC driver.
+
+```xml
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.27
+
+```
+
+## 14. taos connect failed, reason: invalid timestamp.
+
+The common reason is that the server time and client time are not calibrated, which can be calibrated by synchronizing with the time server (use ntpdate command under Linux, and select automatic synchronization in the Windows time setting).
+
+## 15. Incomplete display of table name
+
+Due to the limited display width of taos shell in the terminal, it is possible that a relatively long table name is not displayed completely. If relevant operations are carried out according to the displayed incomplete table name, a Table does not exist error will occur. The workaround can be by modifying the setting option maxBinaryDisplayWidth in the taos.cfg file, or directly entering the command `set max_binary_display_width 100`. Or, use the \\G parameter at the end of the command to adjust how the results are displayed.
+
+## 16. How to migrate data?
+
+TDengine uniquely identifies a machine according to hostname. When moving data files from machine A to machine B, pay attention to the following three points:
+
+- For versions 2.0. 0.0 to 2.0. 6. x, reconfigure machine B's hostname to machine A's.
+- For 2.0. 7.0 and later versions, go to/var/lib/taos/dnode, repair the FQDN corresponding to dnodeId of dnodeEps.json, and restart. Make sure this file is identical for all machines.
+- The storage structures of versions 1. x and 2. x are incompatible, and it is necessary to use migration tools or your own application to export and import data.
+
+## 17. How to temporarily adjust the log level in command line program taos?
+
+For the convenience of debugging, since version 2.0. 16, command line program taos gets two new instructions related to logging:
+
+```mysql
+ALTER LOCAL flag_name flag_value;
+```
+
+This means that under the current command line program, modify the loglevel of a specific module (only valid for the current command line program, if taos is restarted, it needs to be reset):
+
+- The values of flag_name can be: debugFlag, cDebugFlag, tmrDebugFlag, uDebugFlag, rpcDebugFlag
+- Flag_value values can be: 131 (output error and alarm logs), 135 (output error, alarm, and debug logs), 143 (output error, alarm, debug, and trace logs)
+
+```mysql
+ALTER LOCAL RESETLOG;
+```
+
+This means wiping up all client-generated log files on the machine.
+
diff --git a/documentation20/en/images/architecture/dnode.png b/documentation20/en/images/architecture/dnode.png
new file mode 100644
index 0000000000000000000000000000000000000000..cea87dcccba5d2761996e5dde998022d86487eb9
Binary files /dev/null and b/documentation20/en/images/architecture/dnode.png differ
diff --git a/documentation20/webdocs/assets/Picture2.png b/documentation20/en/images/architecture/message.png
similarity index 100%
rename from documentation20/webdocs/assets/Picture2.png
rename to documentation20/en/images/architecture/message.png
diff --git a/documentation20/en/images/architecture/modules.png b/documentation20/en/images/architecture/modules.png
new file mode 100644
index 0000000000000000000000000000000000000000..10ae4703a6cbbf66afea325ce4c0f919f7769a07
Binary files /dev/null and b/documentation20/en/images/architecture/modules.png differ
diff --git a/documentation20/webdocs/assets/stable.png b/documentation20/en/images/architecture/multi_tables.png
similarity index 100%
rename from documentation20/webdocs/assets/stable.png
rename to documentation20/en/images/architecture/multi_tables.png
diff --git a/documentation20/en/images/architecture/replica-forward.png b/documentation20/en/images/architecture/replica-forward.png
new file mode 100644
index 0000000000000000000000000000000000000000..bf616e030b130603eceb5dccfd30b4a1dfa68ea5
Binary files /dev/null and b/documentation20/en/images/architecture/replica-forward.png differ
diff --git a/documentation20/en/images/architecture/replica-master.png b/documentation20/en/images/architecture/replica-master.png
new file mode 100644
index 0000000000000000000000000000000000000000..cb33f1ce98661563693215d8fc73b003235c7668
Binary files /dev/null and b/documentation20/en/images/architecture/replica-master.png differ
diff --git a/documentation20/en/images/architecture/replica-restore.png b/documentation20/en/images/architecture/replica-restore.png
new file mode 100644
index 0000000000000000000000000000000000000000..1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28
Binary files /dev/null and b/documentation20/en/images/architecture/replica-restore.png differ
diff --git a/documentation20/webdocs/assets/structure.png b/documentation20/en/images/architecture/structure.png
similarity index 100%
rename from documentation20/webdocs/assets/structure.png
rename to documentation20/en/images/architecture/structure.png
diff --git a/documentation20/en/images/architecture/vnode.png b/documentation20/en/images/architecture/vnode.png
new file mode 100644
index 0000000000000000000000000000000000000000..e6148d4907cf9a18bc52251f712d5c685651b7f5
Binary files /dev/null and b/documentation20/en/images/architecture/vnode.png differ
diff --git a/documentation20/en/images/architecture/write_master.png b/documentation20/en/images/architecture/write_master.png
new file mode 100644
index 0000000000000000000000000000000000000000..ff2dfc20bfc2ecf956a2aab1a8965a7bbcae4387
Binary files /dev/null and b/documentation20/en/images/architecture/write_master.png differ
diff --git a/documentation20/en/images/architecture/write_slave.png b/documentation20/en/images/architecture/write_slave.png
new file mode 100644
index 0000000000000000000000000000000000000000..cacb2cb6bcc4f4d934e979862387e1345bbac078
Binary files /dev/null and b/documentation20/en/images/architecture/write_slave.png differ
diff --git a/documentation20/en/images/connections/add_datasource1.jpg b/documentation20/en/images/connections/add_datasource1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..1f0f5110f312c57f3ec1788bbc02f04fac6ac142
Binary files /dev/null and b/documentation20/en/images/connections/add_datasource1.jpg differ
diff --git a/documentation20/en/images/connections/add_datasource2.jpg b/documentation20/en/images/connections/add_datasource2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fa7a83e00e96fae649910dff4edf5f5bdadd7850
Binary files /dev/null and b/documentation20/en/images/connections/add_datasource2.jpg differ
diff --git a/documentation20/en/images/connections/add_datasource3.jpg b/documentation20/en/images/connections/add_datasource3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fc850ad08ff1174de972906842e0d5ee64e6e5cb
Binary files /dev/null and b/documentation20/en/images/connections/add_datasource3.jpg differ
diff --git a/documentation20/en/images/connections/add_datasource4.jpg b/documentation20/en/images/connections/add_datasource4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3ba73e50d455111f8621f4165746078554c2d790
Binary files /dev/null and b/documentation20/en/images/connections/add_datasource4.jpg differ
diff --git a/documentation20/en/images/connections/create_dashboard1.jpg b/documentation20/en/images/connections/create_dashboard1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c
Binary files /dev/null and b/documentation20/en/images/connections/create_dashboard1.jpg differ
diff --git a/documentation20/en/images/connections/create_dashboard2.jpg b/documentation20/en/images/connections/create_dashboard2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fe5d768ac55254251e0290bf257178f5ff28f5a5
Binary files /dev/null and b/documentation20/en/images/connections/create_dashboard2.jpg differ
diff --git a/documentation20/en/images/connections/import_dashboard1.jpg b/documentation20/en/images/connections/import_dashboard1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9d2ce7ed65eb0c2c729de50283b30491793493dc
Binary files /dev/null and b/documentation20/en/images/connections/import_dashboard1.jpg differ
diff --git a/documentation20/en/images/connections/import_dashboard2.jpg b/documentation20/en/images/connections/import_dashboard2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..94b09f0ee39552bb84f7ba1f65815ce2c9548b2d
Binary files /dev/null and b/documentation20/en/images/connections/import_dashboard2.jpg differ
diff --git a/documentation20/en/images/connector.png b/documentation20/en/images/connector.png
new file mode 100644
index 0000000000000000000000000000000000000000..6030bd73f51123615eabacfa5d734918559ce3d9
Binary files /dev/null and b/documentation20/en/images/connector.png differ
diff --git a/documentation20/en/images/eco_system.png b/documentation20/en/images/eco_system.png
new file mode 100644
index 0000000000000000000000000000000000000000..bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b
Binary files /dev/null and b/documentation20/en/images/eco_system.png differ
diff --git a/documentation20/en/images/tdengine-jdbc-connector.png b/documentation20/en/images/tdengine-jdbc-connector.png
new file mode 100644
index 0000000000000000000000000000000000000000..fdf1dd3fcc5ee222c4a8753efa2c95c5257314bf
Binary files /dev/null and b/documentation20/en/images/tdengine-jdbc-connector.png differ
diff --git a/documentation20/webdocs/assets/clip_image001-2474914.png b/documentation20/webdocs/assets/clip_image001-2474914.png
deleted file mode 100644
index eb369b1567c860b772e1bfdad64ff17aaac2534d..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/clip_image001-2474914.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/clip_image001-2474939.png b/documentation20/webdocs/assets/clip_image001-2474939.png
deleted file mode 100644
index 53f00deea3a484986a5681ec9d00d8ae02e88fec..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/clip_image001-2474939.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/clip_image001-2474961.png b/documentation20/webdocs/assets/clip_image001-2474961.png
deleted file mode 100644
index 20ae8d6f7724a4bddcf8c7eb3809d468aa4223ed..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/clip_image001-2474961.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/clip_image001-2474987.png b/documentation20/webdocs/assets/clip_image001-2474987.png
deleted file mode 100644
index 3d09f7fc28e7a1fb7e3bb2b9b2bc7c20895e8bb4..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/clip_image001-2474987.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/clip_image001.png b/documentation20/webdocs/assets/clip_image001.png
deleted file mode 100644
index 78b6d06a9562b802e80f0ed5fdb8963b5e525589..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/clip_image001.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/fig1.png b/documentation20/webdocs/assets/fig1.png
deleted file mode 100644
index af9b74e0d1a872b8d93f71842dc0063bc8a86092..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/fig1.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/fig2.png b/documentation20/webdocs/assets/fig2.png
deleted file mode 100644
index 3bae70ba86964c3c341b72ea1d3af04201f7c6c1..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/fig2.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/image-20190707124650780.png b/documentation20/webdocs/assets/image-20190707124650780.png
deleted file mode 100644
index 9ebcac863e862d8b240c86dec29be1ebe7aa50f0..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/image-20190707124650780.png and /dev/null differ
diff --git a/documentation20/webdocs/assets/image-20190707124818590.png b/documentation20/webdocs/assets/image-20190707124818590.png
deleted file mode 100644
index dc1cb6325b2d4cd6f05c88b75b4d17ef85caa67f..0000000000000000000000000000000000000000
Binary files a/documentation20/webdocs/assets/image-20190707124818590.png and /dev/null differ
diff --git a/documentation20/webdocs/markdowndocs/Connections with other Tools.md b/documentation20/webdocs/markdowndocs/Connections with other Tools.md
deleted file mode 100644
index 8be05698497184aee2c41a60e32f39b636e2070e..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Connections with other Tools.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Connect with other tools
-
-## Telegraf
-
-TDengine is easy to integrate with [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/), an open-source server agent for collecting and sending metrics and events, without more development.
-
-### Install Telegraf
-
-At present, TDengine supports Telegraf newer than version 1.7.4. Users can go to the [download link] and choose the proper package to install on your system.
-
-### Configure Telegraf
-
-Telegraf is configured by changing items in the configuration file */etc/telegraf/telegraf.conf*.
-
-
-In **output plugins** section,add _[[outputs.http]]_ iterm:
-
-- _url_: http://ip:6020/telegraf/udb, in which _ip_ is the IP address of any node in TDengine cluster. Port 6020 is the RESTful APT port used by TDengine. _udb_ is the name of the database to save data, which needs to create beforehand.
-- _method_: "POST"
-- _username_: username to login TDengine
-- _password_: password to login TDengine
-- _data_format_: "json"
-- _json_timestamp_units_: "1ms"
-
-In **agent** part:
-
-- hostname: used to distinguish different machines. Need to be unique.
-- metric_batch_size: 30,the maximum number of records allowed to write in Telegraf. The larger the value is, the less frequent requests are sent. For TDengine, the value should be less than 50.
-
-Please refer to the [Telegraf docs](https://docs.influxdata.com/telegraf/v1.11/) for more information.
-
-## Grafana
-
-[Grafana] is an open-source system for time-series data display. It is easy to integrate TDengine and Grafana to build a monitor system. Data saved in TDengine can be fetched and shown on the Grafana dashboard.
-
-### Install Grafana
-
-For now, TDengine only supports Grafana newer than version 5.2.4. Users can go to the [Grafana download page] for the proper package to download.
-
-### Configure Grafana
-
-TDengine Grafana plugin is in the _/usr/local/taos/connector/grafana_ directory.
-Taking Centos 7.2 as an example, just copy TDengine directory to _/var/lib/grafana/plugins_ directory and restart Grafana.
-
-### Use Grafana
-
-Users can log in the Grafana server (username/password:admin/admin) through localhost:3000 to configure TDengine as the data source. As is shown in the picture below, TDengine as a data source option is shown in the box:
-
-
-
-
-When choosing TDengine as the data source, the Host in HTTP configuration should be configured as the IP address of any node of a TDengine cluster. The port should be set as 6020. For example, when TDengine and Grafana are on the same machine, it should be configured as _http://localhost:6020.
-
-
-Besides, users also should set the username and password used to log into TDengine. Then click _Save&Test_ button to save.
-
-
-
-Then, TDengine as a data source should show in the Grafana data source list.
-
-
-
-
-Then, users can create Dashboards in Grafana using TDengine as the data source:
-
-
-
-
-
-
-Click _Add Query_ button to add a query and input the SQL command you want to run in the _INPUT SQL_ text box. The SQL command should expect a two-row, multi-column result, such as _SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)_, in which, _from_, _to_ and _inteval_ are TDengine inner variables representing query time range and time interval.
-
-
-_ALIAS BY_ field is to set the query alias. Click _GENERATE SQL_ to send the command to TDengine:
-
-
-
-Please refer to the [Grafana official document] for more information about Grafana.
-
-
-## Matlab
-
-Matlab can connect to and retrieve data from TDengine by TDengine JDBC Driver.
-
-### MatLab and TDengine JDBC adaptation
-
-Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
-
-1. Copy the file _JDBCDriver-1.0.0-dist.jar_ in TDengine package to the directory _${matlab_root}\MATLAB\R2017a\java\jar\toolbox_
-2. Copy the file _taos.lib_ in TDengine package to _${matlab_ root _dir}\MATLAB\R2017a\lib\win64_
-3. Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of _${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt_
-
- `$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar`
-
-4. Create a file called _javalibrarypath.txt_ in directory _${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\_, and add the _taos.dll_ path in the file. For example, if the file _taos.dll_ is in the directory of _C:\Windows\System32_,then add the following line in file *javalibrarypath.txt*:
-
- `C:\Windows\System32`
-
-### TDengine operations in Matlab
-
-After correct configuration, open Matlab:
-
-- build a connection:
-
- `conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)`
-
-- Query:
-
- `sql0 = [‘select * from tb’]`
-
- `data = select(conn, sql0);`
-
-- Insert a record:
-
- `sql1 = [‘insert into tb values (now, 1)’]`
-
- `exec(conn, sql1)`
-
-Please refer to the file _examples\Matlab\TDengineDemo.m_ for more information.
-
-## R
-
-Users can use R language to access the TDengine server with the JDBC interface. At first, install JDBC package in R:
-
-```R
-install.packages('rJDBC', repos='http://cran.us.r-project.org')
-```
-
-Then use _library_ function to load the package:
-
-```R
-library('RJDBC')
-```
-
-Then load the TDengine JDBC driver:
-
-```R
-drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")
-```
-If succeed, no error message will display. Then use the following command to try a database connection:
-
-```R
-conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
-```
-
-Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully. TDengine supports below functions in _RJDBC_ package:
-
-
-- _dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)_: write the data in a data frame _iris_ to the table _test_ in the TDengine server. Parameter _overwrite_ must be _false_. _append_ must be _TRUE_ and the schema of the data frame _iris_ should be the same as the table _test_.
-- _dbGetQuery(conn, "select count(*) from test")_: run a query command
-- _dbSendUpdate(conn, "use db")_: run any non-query command.
-- _dbReadTable(conn, "test"_): read all the data in table _test_
-- _dbDisconnect(conn)_: close a connection
-- _dbRemoveTable(conn, "test")_: remove table _test_
-
-Below functions are **not supported** currently:
-- _dbExistsTable(conn, "test")_: if talbe _test_ exists
-- _dbListTables(conn)_: list all tables in the connection
-
-
-[Telegraf]: www.taosdata.com
-[download link]: https://portal.influxdata.com/downloads
-[Telegraf document]: www.taosdata.com
-[Grafana]: https://grafana.com
-[Grafana download page]: https://grafana.com/grafana/download
-[Grafana official document]: https://grafana.com/docs/
-
diff --git a/documentation20/webdocs/markdowndocs/Connector.md b/documentation20/webdocs/markdowndocs/Connector.md
deleted file mode 100644
index e5ba6d518542fa60f71708482a9e9b65c12d09ad..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Connector.md
+++ /dev/null
@@ -1,896 +0,0 @@
-# TDengine connectors
-
-TDengine provides many connectors for development, including C/C++, JAVA, Python, RESTful, Go, Node.JS, etc.
-
-NOTE: All APIs which require a SQL string as parameter, including but not limit to `taos_query`, `taos_query_a`, `taos_subscribe` in the C/C++ Connector and their counterparts in other connectors, can ONLY process one SQL statement at a time. If more than one SQL statements are provided, their behaviors are undefined.
-
-## C/C++ API
-
-C/C++ APIs are similar to the MySQL APIs. Applications should include TDengine head file _taos.h_ to use C/C++ APIs by adding the following line in code:
-```C
-#include
-```
-Make sure TDengine library _libtaos.so_ is installed and use _-ltaos_ option to link the library when compiling. In most cases, if the return value of an API is integer, it return _0_ for success and other values as an error code for failure; if the return value is pointer, then _NULL_ is used for failure.
-
-
-### Fundamental API
-
-Fundamentatal APIs prepare runtime environment for other APIs, for example, create a database connection.
-
-- `void taos_init()`
-
- Initialize the runtime environment for TDengine client. The API is not necessary since it is called int _taos_connect_ by default.
-
-
-- `void taos_cleanup()`
-
- Cleanup runtime environment, client should call this API before exit.
-
-
-- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
-
- Set client options. The parameter _option_ supports values of _TSDB_OPTION_CONFIGDIR_ (configuration directory), _TSDB_OPTION_SHELL_ACTIVITY_TIMER_, _TSDB_OPTION_LOCALE_ (client locale) and _TSDB_OPTION_TIMEZONE_ (client timezone).
-
-
-- `char* taos_get_client_info()`
-
- Retrieve version information of client.
-
-
-- `TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, int port)`
-
- Open a connection to a TDengine server. The parameters are:
-
- * ip: IP address of the server
- * user: username
- * pass: password
- * db: database to use, **NULL** for no database to use after connection. Otherwise, the database should exist before connection or a connection error is reported.
- * port: port number to connect
-
- The handle returned by this API should be kept for future use.
-
-
-- `char *taos_get_server_info(TAOS *taos)`
-
- Retrieve version information of server.
-
-
-- `int taos_select_db(TAOS *taos, const char *db)`
-
- Set default database to `db`.
-
-
-- `void taos_close(TAOS *taos)`
-
- Close a connection to a TDengine server by the handle returned by _taos_connect_`
-
-
-### C/C++ sync API
-
-Sync APIs are those APIs waiting for responses from the server after sending a request. TDengine has the following sync APIs:
-
-- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
-
- The API used to run a SQL command. The command can be DQL, DML or DDL. The parameter _taos_ is the handle returned by _taos_connect_. Return value _NULL_ means failure.
-
-
-- `int taos_result_precision(TAOS_RES *res)`
-
- Get the timestamp precision of the result set, return value _0_ means milli-second, _1_ mean micro-second and _2_ means nano-second.
-
-
-- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
-
- Fetch a row of return results through _res_.
-
-
-- `int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)`
-
- Fetch multiple rows from the result set, return value is row count.
-
-
-- `int taos_num_fields(TAOS_RES *res)` and `int taos_field_count(TAOS_RES* res)`
-
- These two APIs are identical, both return the number of fields in the return result.
-
-
-- `int* taos_fetch_lengths(TAOS_RES *res)`
-
- Get the field lengths of the result set, return value is an array whose length is the field count.
-
-
-- `int taos_affected_rows(TAOS_RES *res)`
-
- Get affected row count of the executed statement.
-
-
-- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
-
- Fetch the description of each field. The description includes the property of data type, field name, and bytes. The API should be used with _taos_num_fields_ to fetch a row of data. The structure of `TAOS_FIELD` is:
-
- ```c
- typedef struct taosField {
- char name[65]; // field name
- uint8_t type; // data type
- int16_t bytes; // length of the field in bytes
- } TAOS_FIELD;
- ```
-
-
-- `void taos_stop_query(TAOS_RES *res)`
-
- Stop the execution of a query.
-
-
-- `void taos_free_result(TAOS_RES *res)`
-
- Free the resources used by a result set. Make sure to call this API after fetching results or memory leak would happen.
-
-
-- `char *taos_errstr(TAOS_RES *res)`
-
- Return the reason of the last API call failure. The return value is a string.
-
-
-- `int *taos_errno(TAOS_RES *res)`
-
- Return the error code of the last API call failure. The return value is an integer.
-
-
-**Note**: The connection to a TDengine server is not multi-thread safe. So a connection can only be used by one thread.
-
-
-### C/C++ async API
-
-In addition to sync APIs, TDengine also provides async APIs, which are more efficient. Async APIs are returned right away without waiting for a response from the server, allowing the application to continute with other tasks without blocking. So async APIs are more efficient, especially useful when in a poor network.
-
-All async APIs require callback functions. The callback functions have the format:
-```C
-void fp(void *param, TAOS_RES * res, TYPE param3)
-```
-The first two parameters of the callback function are the same for all async APIs. The third parameter is different for different APIs. Generally, the first parameter is the handle provided to the API for action. The second parameter is a result handle.
-
-- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);`
-
- The async version of _taos_query_.
-
- * taos: the handle returned by _taos_connect_.
- * sql: the SQL command to run.
- * fp: user defined callback function. The third parameter of the callback function _code_ is _0_ (for success) or a negative number (for failure, call taos_errstr to get the error as a string). Applications mainly handle the second parameter, the returned result set.
- * param: user provided parameter which is required by the callback function.
-
-
-- `void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);`
-
- The async API to fetch a batch of rows, which should only be used with a _taos_query_a_ call.
-
- * res: result handle returned by _taos_query_a_.
- * fp: the callback function. _param_ is a user-defined structure to pass to _fp_. The parameter _numOfRows_ is the number of result rows in the current fetch cycle. In the callback function, applications should call _taos_fetch_row_ to get records from the result handle. After getting a batch of results, applications should continue to call _taos_fetch_rows_a_ API to handle the next batch, until the _numOfRows_ is _0_ (for no more data to fetch) or _-1_ (for failure).
-
-
-- `void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);`
-
- The async API to fetch a result row.
-
- * res: result handle.
- * fp: the callback function. _param_ is a user-defined structure to pass to _fp_. The third parameter of the callback function is a single result row, which is different from that of _taos_fetch_rows_a_ API. With this API, it is not necessary to call _taos_fetch_row_ to retrieve each result row, which is handier than _taos_fetch_rows_a_ but less efficient.
-
-
-Applications may apply operations on multiple tables. However, **it is important to make sure the operations on the same table are serialized**. That means after sending an insert request in a table to the server, no operations on the table are allowed before a response is received.
-
-
-### C/C++ parameter binding API
-
-TDengine also provides parameter binding APIs, like MySQL, only question mark `?` can be used to represent a parameter in these APIs.
-
-- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
-
- Create a TAOS_STMT to represent the prepared statement for other APIs.
-
-- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
-
- Parse SQL statement _sql_ and bind result to _stmt_ , if _length_ larger than 0, its value is used to determine the length of _sql_, the API auto detects the actual length of _sql_ otherwise.
-
-- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
-
- Bind values to parameters. _bind_ points to an array, the element count and sequence of the array must be identical as the parameters of the SQL statement. The usage of _TAOS_BIND_ is same as _MYSQL_BIND_ in MySQL, its definition is as below:
-
- ```c
- typedef struct TAOS_BIND {
- int buffer_type;
- void * buffer;
- unsigned long buffer_length; // not used in TDengine
- unsigned long *length;
- int * is_null;
- int is_unsigned; // not used in TDengine
- int * error; // not used in TDengine
- } TAOS_BIND;
- ```
-
-- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
-
- Add bound parameters to batch, client can call `taos_stmt_bind_param` again after calling this API. Note this API only support _insert_ / _import_ statements, it returns an error in other cases.
-
-- `int taos_stmt_execute(TAOS_STMT *stmt)`
-
- Execute the prepared statement. This API can only be called once for a statement at present.
-
-- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
-
- Acquire the result set of an executed statement. The usage of the result is same as `taos_use_result`, `taos_free_result` must be called after one you are done with the result set to release resources.
-
-- `int taos_stmt_close(TAOS_STMT *stmt)`
-
- Close the statement, release all resources.
-
-
-### C/C++ continuous query interface
-
-TDengine provides APIs for continuous query driven by time, which run queries periodically in the background. There are only two APIs:
-
-
-- `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES * res, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *));`
-
- The API is used to create a continuous query.
- * _taos_: the connection handle returned by _taos_connect_.
- * _sqlstr_: the SQL string to run. Only query commands are allowed.
- * _fp_: the callback function to run after a query. TDengine passes query result `row`, query state `res` and user provided parameter `param` to this function. In this callback, `taos_num_fields` and `taos_fetch_fields` could be used to fetch field information.
- * _param_: a parameter passed to _fp_
- * _stime_: the time of the stream starts in the form of epoch milliseconds. If _0_ is given, the start time is set as the current time.
- * _callback_: a callback function to run when the continuous query stops automatically.
-
- The API is expected to return a handle for success. Otherwise, a NULL pointer is returned.
-
-
-- `void taos_close_stream (TAOS_STREAM *tstr)`
-
- Close the continuous query by the handle returned by _taos_open_stream_. Make sure to call this API when the continuous query is not needed anymore.
-
-
-### C/C++ subscription API
-
-For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server.
-
-* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
-
- The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are:
- * **taos**: The database connnection, which must be established already.
- * **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning.
- * **topic**: The unique identifier of a subscription.
- * **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field.
- * **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype.
- * **param**: User provided additional parameter for the callback function.
- * **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval.
-
-* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
-
- Prototype of the callback function, the parameters are:
- * tsub: The subscription object.
- * res: The query result.
- * param: User provided additional parameter when calling `taos_subscribe`.
- * code: Error code in case of failures.
-
-* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
-
- The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`.
-
-* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
-
- Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise.
-
-
-## Java Connector
-
-To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1].
-
-Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver:
-
-* libtaos.so (Linux)
- After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path.
-
-* taos.dll (Windows)
- After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path.
-
-> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15].
-
-Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver:
-* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method.
-* No support for transaction
-* No support for union between tables
-* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`.
-
-## Version list of TAOS-JDBCDriver and required TDengine and JDK
-
-| taos-jdbcdriver | TDengine | JDK |
-| --- | --- | --- |
-| 2.0.2 | 2.0.0.x or higher | 1.8.x |
-| 1.0.3 | 1.6.1.x or higher | 1.8.x |
-| 1.0.2 | 1.6.1.x or higher | 1.8.x |
-| 1.0.1 | 1.6.1.x or higher | 1.8.x |
-
-## DataType in TDengine and Java
-
-The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java:
-
-| TDengine | Java |
-| --- | --- |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT, TINYINT |java.lang.Short |
-| BOOL | java.lang.Boolean |
-| BINARY, NCHAR | java.lang.String |
-
-## How to get TAOS-JDBC Driver
-
-### maven repository
-
-taos-jdbcdriver has been published to [Sonatype Repository][1]:
-* [sonatype][8]
-* [mvnrepository][9]
-* [maven.aliyun][10]
-
-Using the following pom.xml for maven projects
-
-```xml
-
-
- com.taosdata.jdbc
- taos-jdbcdriver
- 2.0.2
-
-
-```
-
-### JAR file from the source code
-
-After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated.
-
-## Usage
-
-### get the connection
-
-```java
-Class.forName("com.taosdata.jdbc.TSDBDriver");
-String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
-Connection conn = DriverManager.getConnection(jdbcUrl);
-```
-> `6030` is the default port and `log` is the default database for system monitor.
-
-A normal JDBC URL looks as follows:
-`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
-
-values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes:
-
-* user:user name for login, defaultly root。
-* password:password for login,defaultly taosdata。
-* charset:charset for client,defaultly system charset
-* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。
-* locale:language for client,defaultly system locale。
-* timezone:timezone for client,defaultly system timezone。
-
-The options above can be configures (`ordered by priority`):
-1. JDBC URL
-
- As explained above.
-2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
-```java
-public Connection getConn() throws Exception{
- Class.forName("com.taosdata.jdbc.TSDBDriver");
- String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
- Properties connProps = new Properties();
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
- return conn;
-}
-```
-
-3. Configuration file (taos.cfg)
-
- Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows
-```properties
-# client default username
-# defaultUser root
-
-# client default password
-# defaultPass taosdata
-
-# default system charset
-# charset UTF-8
-
-# system locale
-# locale en_US.UTF-8
-```
-> More options can refer to [client configuration][13]
-
-### Create databases and tables
-
-```java
-Statement stmt = conn.createStatement();
-
-// create database
-stmt.executeUpdate("create database if not exists db");
-
-// use database
-stmt.executeUpdate("use db");
-
-// create table
-stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
-```
-> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables
-
-### Insert data
-
-```java
-// insert data
-int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
-
-System.out.println("insert " + affectedRows + " rows.");
-```
-> _now_ is the server time.
-> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year).
-
-### Query database
-
-```java
-// query data
-ResultSet resultSet = stmt.executeQuery("select * from tb");
-
-Timestamp ts = null;
-int temperature = 0;
-float humidity = 0;
-while(resultSet.next()){
-
- ts = resultSet.getTimestamp(1);
- temperature = resultSet.getInt(2);
- humidity = resultSet.getFloat("humidity");
-
- System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
-}
-```
-> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results.
-
-### Close all
-
-```java
-resultSet.close();
-stmt.close();
-conn.close();
-```
-> `please make sure the connection is closed to avoid the error like connection leakage`
-
-## Using connection pool
-
-**HikariCP**
-
-* dependence in pom.xml:
-```xml
-
- com.zaxxer
- HikariCP
- 3.4.1
-
-```
-
-* Examples:
-```java
- public static void main(String[] args) throws SQLException {
- HikariConfig config = new HikariConfig();
- config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
- config.setUsername("root");
- config.setPassword("taosdata");
-
- config.setMinimumIdle(3); //minimum number of idle connection
- config.setMaximumPoolSize(10); //maximum number of connection in the pool
- config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
- config.setIdleTimeout(60000); // max idle time for recycle idle connection
- config.setConnectionTestQuery("describe log.dn"); //validation query
- config.setValidationTimeout(3000); //validation query timeout
-
- HikariDataSource ds = new HikariDataSource(config); //create datasource
-
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool.
-> More instructions can refer to [User Guide][5]
-
-**Druid**
-
-* dependency in pom.xml:
-
-```xml
-
- com.alibaba
- druid
- 1.1.20
-
-```
-
-* Examples:
-```java
-public static void main(String[] args) throws Exception {
- Properties properties = new Properties();
- properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
- properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
- properties.put("username","root");
- properties.put("password","taosdata");
-
- properties.put("maxActive","10"); //maximum number of connection in the pool
- properties.put("initialSize","3");//initial number of connection
- properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
- properties.put("minIdle","3");//minimum number of connection in the pool
-
- properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
-
- properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
- properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
-
- properties.put("validationQuery","describe log.dn"); //validation query
- properties.put("testWhileIdle","true"); // test connection while idle
- properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
- properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
-
- //create druid datasource
- DataSource ds = DruidDataSourceFactory.createDataSource(properties);
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
-
- //query or insert
- // ...
-
- connection.close(); // put back to conneciton pool
-}
-```
-> More instructions can refer to [User Guide][6]
-
-**Notice**
-* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`.
-
-As follows,`1` will be returned if `select server_status()` is successfully executed。
-```shell
-taos> select server_status();
-server_status()|
-================
-1 |
-Query OK, 1 row(s) in set (0.000141s)
-```
-
-## Python Connector
-
-### Install TDengine Python client
-
-Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
-
-```cmd
-pip install src/connector/python/python2/
-```
-
-or
-
-```
-pip install src/connector/python/python3/
-```
-
-If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
-
-### Python client interfaces
-
-To use TDengine Python client, import TDengine module at first:
-
-```python
-import taos
-```
-
-Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:
-
-- _TDengineConnection_ class
-
- Run `help(taos.TDengineConnection)` in python terminal for details.
-
-- _TDengineCursor_ class
-
- Run `help(taos.TDengineCursor)` in python terminal for details.
-
-- connect method
-
- Open a connection. Run `help(taos.connect)` in python terminal for details.
-
-## RESTful Connector
-
-TDengine also provides RESTful API to satisfy developing on different platforms. Unlike other databases, TDengine RESTful API applies operations to the database through the SQL command in the body of HTTP POST request. What users are required to provide is just a URL.
-
-
-For the time being, TDengine RESTful API uses a _\_ generated from username and password for identification. Safer identification methods will be provided in the future.
-
-
-### HTTP URL encoding
-
-To use TDengine RESTful API, the URL should have the following encoding format:
-```
-http://:/rest/sql
-```
-- _ip_: IP address of any node in a TDengine cluster
-- _PORT_: TDengine HTTP service port. It is 6020 by default.
-
-For example, the URL encoding _http://192.168.0.1:6020/rest/sql_ used to send HTTP request to a TDengine server with IP address as 192.168.0.1.
-
-It is required to add a token in an HTTP request header for identification.
-
-```
-Authorization: Basic
-```
-
-The HTTP request body contains the SQL command to run. If the SQL command contains a table name, it should also provide the database name it belongs to in the form of `.`. Otherwise, an error code is returned.
-
-For example, use _curl_ command to send a HTTP request:
-
-```
-curl -H 'Authorization: Basic ' -d '' :/rest/sql
-```
-
-or use
-
-```
-curl -u username:password -d '' :/rest/sql
-```
-
-where `TOKEN` is the encryted string of `{username}:{password}` using the Base64 algorithm, e.g. `root:taosdata` will be encoded as `cm9vdDp0YW9zZGF0YQ==`
-
-### HTTP response
-
-The HTTP resonse is in JSON format as below:
-
-```
-{
- "status": "succ",
- "head": ["column1","column2", …],
- "data": [
- ["2017-12-12 23:44:25.730", 1],
- ["2017-12-12 22:44:25.728", 4]
- ],
- "rows": 2
-}
-```
-Specifically,
-- _status_: the result of the operation, success or failure
-- _head_: description of returned result columns
-- _data_: the returned data array. If no data is returned, only an _affected_rows_ field is listed
-- _rows_: the number of rows returned
-
-### Example
-
-- Use _curl_ command to query all the data in table _t1_ of database _demo_:
-
- `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql`
-
-The return value is like:
-
-```
-{
- "status": "succ",
- "head": ["column1","column2","column3"],
- "data": [
- ["2017-12-12 23:44:25.730", 1, 2.3],
- ["2017-12-12 22:44:25.728", 4, 5.6]
- ],
- "rows": 2
-}
-```
-
-- Use HTTP to create a database:
-
- `curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql`
-
- The return value should be:
-
-```
-{
- "status": "succ",
- "head": ["affected_rows"],
- "data": [[1]],
- "rows": 1,
-}
-```
-
-## Go Connector
-
-TDengine provides a GO client package `taosSql`. `taosSql` implements a kind of interface of GO `database/sql/driver`. User can access TDengine by importing the package in their program with the following instructions, detailed usage please refer to `https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`
-
-```Go
-import (
- "database/sql"
- _ github.com/taosdata/driver-go/taoSql“
-)
-```
-### API
-
-* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
-
- Open DB, generally DRIVER_NAME will be used as a constant with default value `taosSql`, dataSourceName is a combined String with format `user:password@/tcp(host:port)/dbname`. If user wants to access TDengine with multiple goroutine concurrently, the better way is to create an sql.Open object in each goroutine to access TDengine.
-
- **Note**: When calling this api, only a few initial work are done, instead the validity check happened during executing `Query` or `Exec`, at this time the connection will be created, and system will check if `user、password、host、port` is valid. Additionaly the most of features are implemented in the taosSql dependency lib `libtaos`, from this view, sql.Open is lightweight.
-
-* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
-
- Execute non-Query related SQLs, the execution result is stored with type of Result.
-
-
-* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)`
-
- Execute Query related SQLs, the execution result is *Raw, the detailed usage can refer GO interface `database/sql/driver`
-
-## Node.js Connector
-
-TDengine also provides a node.js connector package that is installable through [npm](https://www.npmjs.com/). The package is also in our source code at *src/connector/nodejs/*. The following instructions are also available [here](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs)
-
-To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/).
-
-```cmd
-npm install td-connector
-```
-
-It is highly suggested you use npm. If you don't have it installed, you can also just copy the nodejs folder from *src/connector/nodejs/* into your node project folder.
-
-To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)
-
-### On Unix
-
-- `python` (`v2.7` recommended, `v3.x.x` is **not** supported)
-- `make`
-- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org)
-
-### On macOS
-
-- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS)
-
-- Xcode
-
- - You also need to install the
-
- ```
- Command Line Tools
- ```
-
- via Xcode. You can find this under the menu
-
- ```
- Xcode -> Preferences -> Locations
- ```
-
- (or by running
-
- ```
- xcode-select --install
- ```
-
- in your Terminal)
-
- - This step will install `gcc` and the related toolchain containing `make`
-
-### On Windows
-
-#### Option 1
-
-Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator).
-
-#### Option 2
-
-Install tools and configuration manually:
-
-- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload)
-- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.)
-- Launch cmd, `npm config set msvs_version 2017`
-
-If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips.
-
-To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
-
-### Usage
-
-The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node)
-
-#### Connection
-
-To use the connector, first require the library ```td-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below.
-
-A cursor also needs to be initialized in order to interact with TDengine from Node.js.
-
-```javascript
-const taos = require('td-connector');
-var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
-var cursor = conn.cursor(); // Initializing a new cursor
-```
-
-To close a connection, run
-
-```javascript
-conn.close();
-```
-
-#### Queries
-
-We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object.
-
-```javascript
-var query = cursor.query('show databases;')
-```
-
-We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results.
-
-```javascript
-var promise = query.execute();
-promise.then(function(result) {
- result.pretty(); //logs the results to the console as if you were in the taos shell
-});
-```
-
-You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine
-
-```javascript
-var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
-query.execute().then(function(result) {
- result.pretty();
-})
-```
-
-The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery.
-
-```javascript
-var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
-promise.then(function(result) {
- result.pretty();
-})
-```
-#### Async functionality
-
-Async queries can be performed using the same functions such as `cursor.execute`, `cursor.query`, but now with `_a` appended to them.
-
-Say you want to execute an two async query on two seperate tables, using `cursor.query_a`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
-
-```javascript
-var promise1 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
-var promise2 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
-promise1.then(function(result) {
- result.pretty();
-})
-promise2.then(function(result) {
- result.pretty();
-})
-```
-
-
-### Example
-
-An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector)
-
-An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
-
-[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[3]: https://github.com/taosdata/TDengine
-[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
-[5]: https://github.com/brettwooldridge/HikariCP
-[6]: https://github.com/alibaba/druid
-[7]: https://github.com/taosdata/TDengine/issues
-[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[10]: https://maven.aliyun.com/mvn/search
-[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
-[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
-[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
-[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
-[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/Contributor_License_Agreement.md b/documentation20/webdocs/markdowndocs/Contributor_License_Agreement.md
deleted file mode 100644
index 8c158da4c5958384064b9993de6643be86b94fee..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Contributor_License_Agreement.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# TaosData Contributor License Agreement
-
-This TaosData Contributor License Agreement (CLA) applies to any contribution you make to any TaosData projects. If you are representing your employing organization to sign this agreement, please warrant that you have the authority to grant the agreement.
-
-## Terms
-
-**"TaosData"**, **"we"**, **"our"** and **"us"** means TaosData, inc.
-
-**"You"** and **"your"** means you or the organization you are on behalf of to sign this agreement.
-
-**"Contribution"** means any original work you, or the organization you represent submit to TaosData for any project in any manner.
-
-## Copyright License
-
-All rights of your Contribution submitted to TaosData in any manner are granted to TaosData and recipients of software distributed by TaosData. You waive any rights that my affect our ownership of the copyright and grant to us a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable, and sublicensable license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Contributions and any derivative work created based on a Contribution.
-
-## Patent License
-
-With respect to any patents you own or that you can license without payment to any third party, you grant to us and to any recipient of software distributed by us, a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have make, use, sell, offer to sell, import, and otherwise transfer the Contribution in whole or in part, alone or included in any product under any patent you own, or license from a third party, that is necessarily infringed by the Contribution or by combination of the Contribution with any Work.
-
-## Your Representations and Warranties
-
-You represent and warrant that:
-
-- the Contribution you submit is an original work that you can legally grant the rights set out in this agreement.
-
-- the Contribution you submit and licenses you granted does not and will not, infringe the rights of any third party.
-
-- you are not aware of any pending or threatened claims, suits, actions, or charges pertaining to the contributions. You also warrant to notify TaosData immediately if you become aware of any such actual or potential claims, suits, actions, allegations or charges.
-
-## Support
-
-You are not obligated to support your Contribution except you volunteer to provide support. If you want, you can provide for a fee.
-
-**I agree and accept on behalf of myself and behalf of my organization:**
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/Documentation.md b/documentation20/webdocs/markdowndocs/Documentation.md
deleted file mode 100644
index bdafd40f7c76425a4f9734a2561b2b9a945c757f..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Documentation.md
+++ /dev/null
@@ -1,87 +0,0 @@
-#Documentation
-
-TDengine is a highly efficient platform to store, query, and analyze time-series data. It works like a relational database, but you are strongly suggested to read through the following documentation before you experience it.
-
-##Getting Started
-
-- Quick Start: download, install and experience TDengine in a few seconds
-- TDengine Shell: command-line interface to access TDengine server
-- Major Features: insert/query, aggregation, cache, pub/sub, continuous query
-
-## Data Model and Architecture
-
-- Data Model: relational database model, but one table for one device with static tags
-- Architecture: Management Module, Data Module, Client Module
-- Writing Process: records recieved are written to WAL, cache, then ack is sent back to client
-- Data Storage: records are sharded in the time range, and stored column by column
-
-##TAOS SQL
-
-- Data Types: support timestamp, int, float, double, binary, nchar, bool, and other types
-- Database Management: add, drop, check databases
-- Table Management: add, drop, check, alter tables
-- Inserting Records: insert one or more records into tables, historical records can be imported
-- Data Query: query data with time range and filter conditions, support limit/offset
-- SQL Functions: support aggregation, selector, transformation functions
-- Downsampling: aggregate data in successive time windows, support interpolation
-
-##STable: Super Table
-
-- What is a Super Table: an innovated way to aggregate tables
-- Create a STable: it is like creating a standard table, but with tags defined
-- Create a Table via STable: use STable as the template, with tags specified
-- Aggregate Tables via STable: group tables together by specifying the tags filter condition
-- Create Table Automatically: create tables automatically with a STable as a template
-- Management of STables: create/delete/alter super table just like standard tables
-- Management of Tags: add/delete/alter tags on super tables or tables
-
-##Advanced Features
-
-- Continuous Query: query executed by TDengine periodically with a sliding window
-- Publisher/Subscriber: subscribe to the newly arrived data like a typical messaging system
-- Caching: the newly arrived data of each device/table will always be cached
-
-##Connector
-
-- C/C++ Connector: primary method to connect to the server through libtaos client library
-- Java Connector: driver for connecting to the server from Java applications using the JDBC API
-- Python Connector: driver for connecting to the server from Python applications
-- RESTful Connector: a simple way to interact with TDengine via HTTP
-- Go Connector: driver for connecting to the server from Go applications
-- Node.js Connector: driver for connecting to the server from node applications
-
-##Connections with Other Tools
-
-- Telegraf: pass the collected DevOps metrics to TDengine
-- Grafana: query the data saved in TDengine and visualize them
-- Matlab: access TDengine server from Matlab via JDBC
-- R: access TDengine server from R via JDBC
-
-##Administrator
-
-- Directory and Files: files and directories related with TDengine
-- Configuration on Server: customize IP port, cache size, file block size and other settings
-- Configuration on Client: customize locale, default user and others
-- User Management: add/delete users, change passwords
-- Import Data: import data into TDengine from either script or CSV file
-- Export Data: export data either from TDengine shell or from tool taosdump
-- Management of Connections, Streams, Queries: check or kill the connections, queries
-- System Monitor: collect the system metric, and log important operations
-
-##More on System Architecture
-
-- Storage Design: column-based storage with optimization on time-series data
-- Query Design: an efficient way to query time-series data
-- Technical blogs to delve into the inside of TDengine
-
-## More on IoT Big Data
-
-- [Characteristics of IoT Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
-- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
-- [Why TDengine is the Best Choice for IoT Big Data Processing?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
-
-##Tutorials & FAQ
-
-- FAQ : a list of frequently asked questions and answers
-- Use cases : a few typical cases to explain how to use TDengine in IoT platform
-
diff --git a/documentation20/webdocs/markdowndocs/Getting Started.md b/documentation20/webdocs/markdowndocs/Getting Started.md
deleted file mode 100644
index 4d34cb49f4a84ac6c9d63e47bc8230c150b9013e..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Getting Started.md
+++ /dev/null
@@ -1,151 +0,0 @@
-#Getting Started
-
-## Quick Start
-
-At the moment, TDengine only runs on Linux. You can set up and install it either from the source code or the packages . It takes only a few seconds from download to run it successfully.
-
-### Install from Source
-
-Please visit our [github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
-
-### Install from Package
-
-Three different packages are provided, please pick up the one you like.
-
-For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd* package, use the _which systemctl_ command.
-
-```cmd
-which systemctl
-```
-
-If the `systemd` package is not found, please [install from source code](#Install-from-Source).
-
-### Running TDengine
-
-After installation, start the TDengine service by the `systemctl` command.
-
-```cmd
-systemctl start taosd
-```
-
-Then check if the server is working now.
-```cmd
-systemctl status taosd
-```
-
-If the service is running successfully, you can play around through TDengine shell `taos`, the command line interface tool located in directory /usr/local/bin/taos
-
-**Note: The _systemctl_ command needs the root privilege. Use _sudo_ if you are not the _root_ user.**
-
-##TDengine Shell
-To launch TDengine shell, the command line interface, in a Linux terminal, type:
-
-```cmd
-taos
-```
-
-The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](../faq) page for troubleshooting the connection error). The TDengine shell prompt is:
-
-```cmd
-taos>
-```
-
-In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
-
-```mysql
-create database db;
-use db;
-create table t (ts timestamp, cdata int);
-insert into t values ('2019-07-15 10:00:00', 10);
-insert into t values ('2019-07-15 10:01:05', 20);
-select * from t;
- ts | speed |
-===================================
- 19-07-15 10:00:00.000| 10|
- 19-07-15 10:01:05.000| 20|
-Query OK, 2 row(s) in set (0.001700s)
-```
-
-Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
-
-###Shell Command Line Parameters
-
-You can run `taos` command with command line options to fit your needs. Some frequently used options are listed below:
-
-- -c, --config-dir: set the configuration directory. It is _/etc/taos_ by default
-- -h, --host: set the IP address of the server it will connect to, Default is localhost
-- -s, --commands: set the command to run without entering the shell
-- -u, -- user: user name to connect to server. Default is root
-- -p, --password: password. Default is 'taosdata'
-- -?, --help: get a full list of supported options
-
-Examples:
-
-```cmd
-taos -h 192.168.0.1 -s "use db; show tables;"
-```
-
-###Run Batch Commands
-
-Inside TDengine shell, you can run batch commands in a file with *source* command.
-
-```
-taos> source ;
-```
-
-### Tips
-
-- Use up/down arrow key to check the command history
-- To change the default password, use "`alter user`" command
-- ctrl+c to interrupt any queries
-- To clean the cached schema of tables or STables, execute command `RESET QUERY CACHE`
-
-## Major Features
-
-The core functionality of TDengine is the time-series database. To reduce the development and management complexity, and to improve the system efficiency further, TDengine also provides caching, pub/sub messaging system, and stream computing functionalities. It provides a full stack for IoT big data platform. The detailed features are listed below:
-
-- SQL like query language used to insert or explore data
-
-- C/C++, Java(JDBC), Python, Go, RESTful, and Node.JS interfaces for development
-
-- Ad hoc queries/analysis via Python/R/Matlab or TDengine shell
-
-- Continuous queries to support sliding-window based stream computing
-
-- Super table to aggregate multiple time-streams efficiently with flexibility
-
-- Aggregation over a time window on one or multiple time-streams
-
-- Built-in messaging system to support publisher/subscriber model
-
-- Built-in cache for each time stream to make latest data available as fast as light speed
-
-- Transparent handling of historical data and real-time data
-
-- Integrating with Telegraf, Grafana and other tools seamlessly
-
-- A set of tools or configuration to manage TDengine
-
-
-For enterprise edition, TDengine provides more advanced features below:
-
-- Linear scalability to deliver higher capacity/throughput
-
-- High availability to guarantee the carrier-grade service
-
-- Built-in replication between nodes which may span multiple geographical sites
-
-- Multi-tier storage to make historical data management simpler and cost-effective
-
-- Web-based management tools and other tools to make maintenance simpler
-
-TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required.
-
-## Explore More on TDengine
-
-Please read through the whole documentation to learn more about TDengine.
-
diff --git a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md b/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md
deleted file mode 100644
index 44d572268de04662c190a6a5975c784b38aad117..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md
+++ /dev/null
@@ -1,248 +0,0 @@
-# TDengine的技术设计
-
-## 存储设计
-
-TDengine的数据存储主要包含**元数据的存储**和**写入数据的存储**。以下章节详细介绍了TDengine各种数据的存储结构。
-
-### 元数据的存储
-
-TDengine中的元数据信息包括TDengine中的数据库,表,超级表等信息。元数据信息默认存放在 _/var/lib/taos/mgmt/_ 文件夹下。该文件夹的目录结构如下所示:
-```
-/var/lib/taos/
- +--mgmt/
- +--db.db
- +--meters.db
- +--user.db
- +--vgroups.db
-```
-元数据在文件中按顺序排列。文件中的每条记录代表TDengine中的一个元数据机构(数据库、表等)。元数据文件只进行追加操作,即便是元数据的删除,也只是在数据文件中追加一条删除的记录。
-
-### 写入数据的存储
-
-TDengine中写入的数据在硬盘上是按时间维度进行分片的。同一个vnode中的表在同一时间范围内的数据都存放在同一文件组中,如下图中的v0f1804*文件。这一数据分片方式可以大大简化数据在时间维度的查询,提高查询速度。在默认配置下,硬盘上的每个文件存放10天数据。用户可根据需要调整数据库的 _daysPerFile_ 配置项进行配置。 数据在文件中是按块存储的。每个数据块只包含一张表的数据,且数据是按照时间主键递增排列的。数据在数据块中按列存储,这样使得同类型的数据存放在一起,可以大大提高压缩的比例,节省存储空间。TDengine对不同类型的数据采用了不同的压缩算法进行压缩,以达到最优的压缩结果。TDengine使用的压缩算法包括simple8B、delta-of-delta、RLE以及LZ4等。
-
-TDengine的数据文件默认存放在 */var/lib/taos/data/* 下。而 */var/lib/taos/tsdb/* 文件夹下存放了vnode的信息、vnode中表的信息以及数据文件的链接等。其完整目录结构如下所示:
-```
-/var/lib/taos/
- +--tsdb/
- | +--vnode0
- | +--meterObj.v0
- | +--db/
- | +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
- | +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
- | +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
- | +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
- | +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
- | +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
- | :
- +--data/
- +--vnode0/
- +--v0f1804.head1
- +--v0f1804.data
- +--v0f1804.last1
- +--v0f1805.head1
- +--v0f1805.data
- +--v0f1805.last1
- :
-```
-
-#### meterObj文件
-每个vnode中只存在一个 _meterObj_ 文件。该文件中存储了vnode的基本信息(创建时间,配置信息,vnode的统计信息等)以及该vnode中表的信息。其结构如下所示:
-```
-<文件开始>
-[文件头]
-[表记录1偏移量和长度]
-[表记录2偏移量和长度]
-...
-[表记录N偏移量和长度]
-[表记录1]
-[表记录2]
-...
-[表记录N]
-[表记录]
-<文件结尾>
-```
-其中,文件头大小为512字节,主要存放vnode的基本信息。每条表记录代表属于该vnode中的一张表在硬盘上的表示。
-
-#### head文件
-head文件中存放了其对应的data文件中数据块的索引信息。该文件组织形式如下:
-```
-<文件开始>
-[文件头]
-[表1偏移量]
-[表2偏移量]
-...
-[表N偏移量]
-[表1数据索引]
-[表2数据索引]
-...
-[表N数据索引]
-<文件结尾>
-```
-文件开头的偏移量列表表示对应表的数据索引块的开始位置在文件中的偏移量。每张表的数据索引信息在head文件中都是连续存放的。这也使得TDengine在读取单表数据时,可以将该表所有的数据块索引一次性读入内存,大大提高读取速度。表的数据索引块组织如下:
-```
-[索引块信息]
-[数据块1索引]
-[数据块2索引]
-...
-[数据块N索引]
-```
-其中,索引块信息中记录了数据块的个数等描述信息。每个数据块索引对应一个在data文件或last文件中的一个单独的数据块。索引信息中记录了数据块存放的文件、数据块起始位置的偏移量、数据块中数据时间主键的范围等。索引块中的数据块索引是按照时间范围顺序排放的,这也就是说,索引块M对应的数据块中的数据时间范围都大于索引块M-1的。这种预先排序的存储方式使得在TDengine在进行按照时间戳进行查询时可以使用折半查找算法,大大提高查询速度。
-
-#### data文件
-data文件中存放了真实的数据块。该文件只进行追加操作。其文件组织形式如下:
-```
-<文件开始>
-[文件头]
-[数据块1]
-[数据块2]
-...
-[数据块N]
-<文件结尾>
-```
-每个数据块只属于vnode中的一张表,且数据块中的数据按照时间主键排列。数据块中的数据按列组织排放,使得同一类型的数据排放在一起,方便压缩和读取。每个数据块的组织形式如下所示:
-```
-[列1信息]
-[列2信息]
-...
-[列N信息]
-[列1数据]
-[列2数据]
-...
-[列N数据]
-```
-列信息中包含该列的类型,列的压缩算法,列数据在文件中的偏移量以及长度等。除此之外,列信息中也包含该内存块中该列数据的预计算结果,从而在过滤查询时根据预计算结果判定是否读取数据块,大大提高读取速度。
-
-#### last文件
-为了防止数据块的碎片化,提高查询速度和压缩率,TDengine引入了last文件。当要落盘的数据块中的数据条数低于某个阈值时,TDengine会先将该数据块写入到last文件中进行暂时存储。当有新的数据需要落盘时,last文件中的数据会被读取出来与新数据组成新的数据块写入到data文件中。last文件的组织形式与data文件类似。
-
-### TDengine数据存储小结
-TDengine通过其创新的架构和存储结构设计,有效提高了计算机资源的使用率。一方面,TDengine的虚拟化使得TDengine的水平扩展及备份非常容易。另一方面,TDengine将表中数据按时间主键排序存储且其列式存储的组织形式都使TDengine在写入、查询以及压缩方面拥有非常大的优势。
-
-
-## 查询处理
-
-### 概述
-
-TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、管理节点、数据节点协同完成。 各组件包含的与查询处理相关的功能和模块如下:
-
-客户端(Client App)。客户端包含TAOS SQL的解析(SQL Parser)和查询请求执行器(Query Executor),第二阶段聚合器(Result Merger),连续查询管理器(Continuous Query Manager)等主要功能模块构成。SQL解析器负责对SQL语句进行解析校验,并转化为抽象语法树,查询执行器负责将抽象语法树转化查询执行逻辑,并根据SQL语句查询条件,将其转换为针对管理节点元数据查询和针对数据节点的数据查询两级查询处理。由于TAOS SQL当前不提供复杂的嵌套查询和pipeline查询处理机制,所以不再需要查询计划优化、逻辑查询计划到物理查询计划转换等过程。第二阶段聚合器负责将各数据节点查询返回的独立结果进行二阶段聚合生成最后的结果。连续查询管理器则负责针对用户建立的连续查询进行管理,负责定时拉起查询请求并按需将结果写回TDengine或返回给客户应用。此外,客户端还负责查询失败后重试、取消查询请求、以及维持连接心跳、向管理节点上报查询状态等工作。
-
-管理节点(Management Node)。管理节点保存了整个集群系统的全部数据的元数据信息,向客户端节点提供查询所需的数据的元数据,并根据集群的负载情况切分查询请求。通过超级表包含了通过该超级表创建的所有表的信息,因此查询处理器(Query Executor)负责针对标签(TAG)的查询处理,并将满足标签查询请求的表信息返回给客户端。此外,管理节点还维护集群的查询状态(Query Status Manager)维护,查询状态管理中在内存中临时保存有当前正在执行的全部查询,当客户端使用 *show queries* 命令的时候,将当前系统正在运行的查询信息返回客户端。
-
-数据节点(Data Node)。数据节点保存了数据库中全部数据内容,并通过查询执行器、查询处理调度器、查询任务队列(Query Task Queue)进行查询处理的调度执行,从客户端接收到的查询处理请求都统一放置到处理队列中,查询执行器从队列中获得查询请求,并负责执行。通过查询优化器(Query Optimizer)对于查询进行基本的优化处理,以及通过数据节点的查询执行器(Query Executor)扫描符合条件的数据单元并返回计算结果。等接收客户端发出的查询请求,执行查询处理,并将结果返回。同时数据节点还需要响应来自管理节点的管理信息和命令,例如 *kill query* 命令以后,需要即刻停止执行的查询任务。
-
-
-图 1. 系统查询处理架构图(只包含查询相关组件)
-
-### 普通查询处理
-
-客户端、管理节点、数据节点协同完成TDengine的查询处理全流程。我们以一个具体的SQL查询为例,说明TDengine的查询处理流程。SQL语句向超级表*FOO_SUPER_TABLE*查询获取时间范围在2019年1月12日整天,标签TAG_LOC是'beijing'的表所包含的所有记录总数,SQL语句如下:
-
-```sql
-SELECT COUNT(*)
-FROM FOO_SUPER_TABLE
-WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
-```
-
-首先,客户端调用TAOS SQL解析器对SQL语句进行解析及合法性检查,然后生成语法树,并从中提取查询的对象 — 超级表 *FOO_SUPER_TABLE* ,然后解析器向管理节点(Management Node)请求其相应的元数据信息,并将过滤信息(TAG_LOC='beijing')同时发送到管理节点。
-
-管理节点接收元数据获取的请求,首先找到超级表 *FOO_SUPER_TABLE* 基础信息,然后应用查询条件来过滤通过该超级表创建的全部表,最后满足查询条件(TAG_LOC='beijing'),即 *TAG_LOC* 标签列是 'beijing' 的的通过其查询执行器将满足查询要求的对象(表或超级表)的元数据信息返回给客户端。
-
-客户端获得了 *FOO_SUPER_TABLE* 的元数据信息后,查询执行器根据元数据中的数据分布,分别向保存有相应数据的节点发起查询请求,此时时间戳范围过滤条件(TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00')需要同时发送给全部的数据节点。
-
-数据节点接收到发自客户端的查询,转化为内部结构并进行优化以后将其放入任务执行队列,等待查询执行器执行。当查询结果获得以后,将查询结果返回客户端。数据节点执行查询的过程均相互独立,完全只依赖于自身的数据和内容进行计算。
-
-当所有查询涉及的数据节点返回结果后,客户端将每个数据节点查询的结果集再次进行聚合(针对本案例,即将所有结果再次进行累加),累加的结果即为最后的查询结果。第二阶段聚合并不是所有的查询都需要。例如,针对数据的列选取操作,实际上是不需要第二阶段聚合。
-
-### REST查询处理
-
-在 C/C++ 、Python接口、 JDBC 接口之外,TDengine 还提供基于 HTTP 协议的 REST 接口。不同于使用应用客户端开发程序进行的开发。当用户使用 REST 接口的时候,所有的查询处理过程都是在服务器端来完成,用户的应用服务不会参与数据库的计算过程,查询处理完成后结果通过 HTTP的 JSON 格式返回给用户。
-
-
-图 2. REST查询架构
-
-当用户使用基于HTTP的REST查询接口,HTTP的请求首先与位于数据节点的HTTP连接器( Connector),建立连接,然后通过REST的签名机制,使用Token来确保请求的可靠性。对于数据节点,HTTP连接器接收到请求后,调用内嵌的客户端程序发起查询请求,内嵌客户端将解析通过HTTP连接器传递过来的SQL语句,解析该SQL语句并按需向管理节点请求元数据信息,然后向本机或集群中其他节点发送查询请求,最后按需聚合计算结果。HTTP连接器接收到请求SQL以后,后续的流程处理与采用应用客户端方式的查询处理完全一致。最后,还需要将查询的结果转换为JSON格式字符串,并通过HTTP 响应返回给客户端。
-
-可以看到,在处理HTTP流程的整个过程中,用户应用不再参与到查询处理的过程中,只负责通过HTTP协议发送SQL请求并接收JSON格式的结果。同时还需要注意的是,每个数据节点均内嵌了一个HTTP连接器和客户端程序,因此请求集群中任何一个数据节点,该数据节点均能够通过HTTP协议返回用户的查询结果。
-
-### 技术特征
-
-由于TDengine采用数据和标签分离存储的模式,能够极大地降低标签数据存储的冗余度。标签数据直接关联到每个表,并采用全内存的结构进行管理和维护标签数据,全内存的结构提供快速的查询处理,千万级别规模的标签数据查询可以在毫秒级别返回。首先针对标签数据的过滤可以有效地降低第二阶段的查询涉及的数据规模。为有效地提升查询处理的性能,针对物联网数据的不可更改的特点,TDengine采用在每个保存的数据块上,都记录下该数据块中数据的最大值、最小值、和等统计数据。如果查询处理涉及整个数据块的全部数据,则直接使用预计算结果,不再读取数据块的内容。由于预计算模块的大小远小于磁盘上存储的具体数据的大小,对于磁盘IO为瓶颈的查询处理,使用预计算结果可以极大地减小读取IO,并加速查询处理的流程。
-
-由于TDengine采用按列存储数据。当从磁盘中读取数据块进行计算的时候,按照查询列信息读取该列数据,并不需要读取其他不相关的数据,可以最小化读取数据。此外,由于采用列存储结构,数据节点针对数据的扫描采用该列数据块进行,可以充分利用CPU L2高速缓存,极大地加速数据扫描的速度。此外,对于某些查询,并不会等全部查询结果生成后再返回结果。例如,列选取查询,当第一批查询结果获得以后,数据节点直接将其返回客户端。同时,在查询处理过程中,系统在数据节点接收到查询请求以后马上返回客户端查询确认信息,并同时拉起查询处理过程,并等待查询执行完成后才返回给用户查询有响应。
-
-## TDengine集群设计
-
-### 1:集群与主要逻辑单元
-
-TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进行设计的,是基于任何单台计算机都无足够能力处理海量数据的假设进行设计的。因此TDengine从研发的第一天起,就按照分布式高可靠架构进行设计,是完全去中心化的,是水平扩展的,这样任何单台或多台服务器宕机或软件错误都不影响系统的服务。通过节点虚拟化并辅以自动化负载均衡技术,TDengine能最大限度地利用异构集群中的计算和存储资源。而且只要数据副本数大于一,无论是硬软件的升级、还是IDC的迁移等都无需停止集群的服务,极大地保证系统的正常运行,并且降低了系统管理员和运维人员的工作量。
-
-下面的示例图上有八个物理节点,每个物理节点被逻辑的划分为多个虚拟节点。下面对系统的基本概念进行介绍。
-
-
-
-
-
-**物理节点(dnode)**:集群中的一物理服务器或云平台上的一虚拟机。为安全以及通讯效率,一个物理节点可配置两张网卡,或两个IP地址。其中一张网卡用于集群内部通讯,其IP地址为**privateIp**, 另外一张网卡用于与集群外部应用的通讯,其IP地址为**publicIp**。在一些云平台(如阿里云),对外的IP地址是映射过来的,因此publicIp还有一个对应的内部IP地址**internalIp**(与privateIp不同)。对于只有一个IP地址的物理节点,publicIp, privateIp以及internalIp都是同一个地址,没有任何区别。一个dnode上有而且只有一个taosd实例运行。
-
-**虚拟数据节点(vnode)**:在物理节点之上的可独立运行的基础逻辑单元,时序数据写入、存储、查询等操作逻辑都在虚拟节点中进行(图中V),采集的时序数据就存储在vnode上。一个vnode包含固定数量的表。当创建一张新表时,系统会检查是否需要创建新的vnode。一个物理节点上能创建的vnode的数量取决于物理节点的硬件资源。一个vnode只属于一个DB,但一个DB可以有多个vnode。
-
-**虚拟数据节点组(vgroup)**: 位于不同物理节点的vnode可以组成一个虚拟数据节点组vnode group(如上图dnode0中的V0, dnode1中的V1, dnode6中的V2属于同一个虚拟节点组)。归属于同一个vgroup的虚拟节点采取master/slave的方式进行管理。写只能在master上进行,但采用asynchronous的方式将数据同步到slave,这样确保了一份数据在多个物理节点上有拷贝。如果master节点宕机,其他节点监测到后,将重新选举vgroup里的master, 新的master能继续处理数据请求,从而保证系统运行的可靠性。一个vgroup里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N,系统必须有至少N个物理节点。副本数在创建DB时通过参数replica可以指定,缺省为1。使用TDengine, 数据的安全依靠多副本解决,因此不再需要昂贵的磁盘阵列等存储设备。
-
-**虚拟管理节点(mnode)**:负责所有节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,虚拟管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为Meta Node。TDengine集群中可配置多个(最多不超过5个) mnode,它们自动构建成为一个管理节点集群(图中M0, M1, M2)。mnode间采用master/slave的机制进行管理,而且采取强一致方式进行数据同步。mnode集群的创建由系统自动完成,无需人工干预。每个dnode上至多有一个mnode,而且每个dnode都知道整个集群中所有mnode的IP地址。
-
-**taosc**:一个软件模块,是TDengine给应用提供的驱动程序(driver),内嵌于JDBC、ODBC driver中,或者C语言连接库里。应用都是通过taosc而不是直接来与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的虚拟节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, ODBC, C/C++接口而言,这个模块是在应用所处的计算机上运行,但消耗的资源很小。为支持全分布式的REST接口,taosc在TDengine集群的每个dnode上都有一运行实例。
-
-**对外服务地址**:TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时,选项-h需要提供的就是publicIp。
-
-**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图连接secondIp。
-
-dnode启动后,会获知集群的mnode IP列表,并且定时向mnode发送状态信息。
-
-vnode与mnode只是逻辑上的划分,都是执行程序taosd里的不同线程而已,无需安装不同的软件,做任何特殊的配置。最小的系统配置就是一个物理节点,vnode,mnode和taosc都存在而且都正常运行,但单一节点无法保证系统的高可靠。
-
-### 2:一典型的操作流程
-
-为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。
-
-
-
-
-
-
-
-1. 应用通过JDBC、ODBC或其他API接口发起插入数据的请求。
-2. taosc会检查缓存,看是有保存有该表的meta data。如果有,直接到第4步。如果没有,taosc将向mnode发出get meta-data请求。
-3. mnode将该表的meta-data返回给taosc。Meta-data包含有该表的schema, 而且还有该表所属的vgroup信息(vnode ID以及所在的dnode的IP地址,如果副本数为N,就有N组vnodeID/IP)。如果taosc迟迟得不到mnode回应,而且存在多个mnode,taosc将向下一个mnode发出请求。
-4. taosc向master vnode发起插入请求。
-5. vnode插入数据后,给taosc一个应答,表示插入成功。如果taosc迟迟得不到vnode的回应,taosc会认为该节点已经离线。这种情况下,如果被插入的数据库有多个副本,taosc将向vgroup里下一个vnode发出插入请求。
-6. taosc通知APP,写入成功。
-
-对于第二和第三步,taosc启动时,并不知道mnode的IP地址,因此会直接向配置的集群对外服务的IP地址发起请求。如果接收到该请求的dnode并没有配置mnode,该dnode会在回复的消息中告知mnode的IP地址列表(如果有多个dnodes,mnode的IP地址可以有多个),这样taosc会重新向新的mnode的IP地址发出获取meta-data的请求。
-
-对于第四和第五步,没有缓存的情况下,taosc无法知道虚拟节点组里谁是master,就假设第一个vnodeID/IP就是master,向它发出请求。如果接收到请求的vnode并不是master,它会在回复中告知谁是master,这样taosc就向建议的master vnode发出请求。一旦得到插入成功的回复,taosc会缓存住master节点的信息。
-
-上述是插入数据的流程,查询、计算的流程也完全一致。taosc把这些复杂的流程全部封装屏蔽了,因此应用无需处理重定向、获取meta data等细节,完全是透明的。
-
-通过taosc缓存机制,只有在第一次对一张表操作时,才需要访问mnode, 因此mnode不会成为系统瓶颈。但因为schema有可能变化,而且vgroup有可能发生改变(比如负载均衡发生),因此taosc需要定时自动刷新缓存。
-
-### 3:数据分区
-
-vnode(虚拟数据节点)保存采集的时序数据,而且查询、计算都在这些节点上进行。为便于负载均衡、数据恢复、支持异构环境,TDengine将一个物理节点根据其计算和存储资源切分为多个vnode。这些vnode的管理是TDengine自动完成的,对应用完全透明。
-
-对于单独一个数据采集点,无论其数据量多大,一个vnode(或vnode group, 如果副本数大于1)有足够的计算资源和存储资源来处理(如果每秒生成一条16字节的记录,一年产生的原始数据不到0.5G),因此TDengine将一张表的所有数据都存放在一个vnode里,而不会让同一个采集点的数据分布到两个或多个dnode上。而且一个vnode可存储多张表的数据,一个vnode可容纳的表的数目由配置参数tables指定,缺省为2000。设计上,一个vnode里所有的表都属于同一个DB。因此一个数据库DB需要的vnode或vgroup的个数等于:数据库表的数目/tables。
-
-创建DB时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的vnode, 而且是否有空位,如果有,立即在该有空位的vnode创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本,系统不是只创建一个vnode,而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。
-
-参数tables的设置需要考虑具体场景,创建DB时,可以个性化指定该参数。该参数不宜过大,也不宜过小。过小,极端情况,就是每个数据采集点一个vnode, 这样导致系统数据文件过多。过大,虚拟化带来的优势就会丧失。给定集群计算资源的情况下,整个系统vnode的个数应该是CPU核的数目的两倍以上。
-
-### 4:负载均衡
-
-每个dnode(物理节点)都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此mnode了解整个集群的状态。基于整体状态,当mnode发现某个dnode负载过重,它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。负载均衡操作结束后,应用也无需重启,将自动连接新的vnode。
-
-如果mnode一段时间没有收到dnode的状态报告,mnode会认为这个dnode已经离线。如果离线时间超过一定时长(时长由配置参数offlineThreshold决定),该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一,系统将自动在其他dnode上创建新的副本,以保证数据的副本数。
-
-
-
-**Note:**目前集群功能仅仅限于企业版
diff --git a/documentation20/webdocs/markdowndocs/More on System Architecture.md b/documentation20/webdocs/markdowndocs/More on System Architecture.md
deleted file mode 100644
index d7a38b99a3ae5a630509f3ef0f0ffdc97d3aaaf1..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/More on System Architecture.md
+++ /dev/null
@@ -1,176 +0,0 @@
-# TDengine System Architecture
-
-## Storage Design
-
-TDengine data mainly include **metadata** and **data** that we will introduce in the following sections.
-
-### Metadata Storage
-
-Metadata include the information of databases, tables, etc. Metadata files are saved in _/var/lib/taos/mgmt/_ directory by default. The directory tree is as below:
-```
-/var/lib/taos/
- +--mgmt/
- +--db.db
- +--meters.db
- +--user.db
- +--vgroups.db
-```
-
-A metadata structure (database, table, etc.) is saved as a record in a metadata file. All metadata files are appended only, and even a drop operation adds a deletion record at the end of the file.
-
-### Data storage
-
-Data in TDengine are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same filegroup, such as files v0f1804*. This sharding strategy can effectively improve data searching speed. By default, a group of files contains data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause. Data in files are blockwised. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp, which helps to improve the compression rate and save storage. The compression algorithms used in TDengine include simple8B, delta-of-delta, RLE, LZ4, etc.
-
-By default, TDengine data are saved in */var/lib/taos/data/* directory. _/var/lib/taos/tsdb/_ directory contains vnode informations and data file linkes.
-
-```
-/var/lib/taos/
- +--tsdb/
- | +--vnode0
- | +--meterObj.v0
- | +--db/
- | +--v0f1804.head->/var/lib/taos/data/vnode0/v0f1804.head1
- | +--v0f1804.data->/var/lib/taos/data/vnode0/v0f1804.data
- | +--v0f1804.last->/var/lib/taos/data/vnode0/v0f1804.last1
- | +--v0f1805.head->/var/lib/taos/data/vnode0/v0f1805.head1
- | +--v0f1805.data->/var/lib/taos/data/vnode0/v0f1805.data
- | +--v0f1805.last->/var/lib/taos/data/vnode0/v0f1805.last1
- | :
- +--data/
- +--vnode0/
- +--v0f1804.head1
- +--v0f1804.data
- +--v0f1804.last1
- +--v0f1805.head1
- +--v0f1805.data
- +--v0f1805.last1
- :
-```
-
-#### meterObj file
-There are only one meterObj file in a vnode. Informations bout the vnode, such as created time, configuration information, vnode statistic informations are saved in this file. It has the structure like below:
-
-```
-
-[file_header]
-[table_record1_offset&length]
-[table_record2_offset&length]
-...
-[table_recordN_offset&length]
-[table_record1]
-[table_record2]
-...
-[table_recordN]
-
-```
-The file header takes 512 bytes, which mainly contains informations about the vnode. Each table record is the representation of a table on disk.
-
-#### head file
-The _head_ files contain the index of data blocks in the _data_ file. The inner organization is as below:
-```
-
-[file_header]
-[table1_offset]
-[table2_offset]
-...
-[tableN_offset]
-[table1_index_block]
-[table2_index_block]
-...
-[tableN_index_block]
-
-```
-The table offset array in the _head_ file saves the information about the offsets of each table index block. Indices on data blocks in the same table are saved continuously. This also makes it efficient to load data indices on the same table. The data index block has a structure like:
-
-```
-[index_block_info]
-[block1_index]
-[block2_index]
-...
-[blockN_index]
-```
-The index block info part contains the information about the index block such as the number of index blocks, etc. Each block index corresponds to a real data block in the _data_ file or _last_ file. Information about the location of the real data block, the primary timestamp range of the data block, etc. are all saved in the block index part. The block indices are sorted in ascending order according to the primary timestamp. So we can apply algorithms such as the binary search on the data to efficiently search blocks according to time.
-
-#### data file
-The _data_ files store the real data block. They are append-only. The organization is as:
-```
-
-[file_header]
-[block1]
-[block2]
-...
-[blockN]
-
-```
-A data block in _data_ files only belongs to a table in the vnode and the records in a data block are sorted in ascending order according to the primary timestamp key. Data blocks are column-oriented. Data in the same column are stored contiguously, which improves reading speed and compression rate because of their similarity. A data block has the following organization:
-
-```
-[column1_info]
-[column2_info]
-...
-[columnN_info]
-[column1_data]
-[column2_data]
-...
-[columnN_data]
-```
-The column info part includes information about column types, column compression algorithm, column data offset and length in the _data_ file, etc. Besides, pre-calculated results of the column data in the block are also in the column info part, which helps to improve reading speed by avoiding loading data block necessarily.
-
-#### last file
-To avoid storage fragment and to import query speed and compression rate, TDengine introduces an extra file, the _last_ file. When the number of records in a data block is lower than a threshold, TDengine will flush the block to the _last_ file for temporary storage. When new data comes, the data in the _last_ file will be merged with the new data and form a larger data block and written to the _data_ file. The organization of the _last_ file is similar to the _data_ file.
-
-### Summary
-The innovation in architecture and storage design of TDengine improves resource usage. On the one hand, the virtualization makes it easy to distribute resources between different vnodes and for future scaling. On the other hand, sorted and column-oriented storage makes TDengine have a great advantage in writing, querying and compression.
-
-## Query Design
-
-#### Introduction
-
-TDengine provides a variety of query functions for both tables and super tables. In addition to regular aggregate queries, it also provides time window based query and statistical aggregation for time series data. TDengine's query processing requires the client app, management node, and data node to work together. The functions and modules involved in query processing included in each component are as follows:
-
-Client (Client App). The client development kit, embed in a client application, consists of TAOS SQL parser and query executor, the second-stage aggregator (Result Merger), continuous query manager and other major functional modules. The SQL parser is responsible for parsing and verifying the SQL statement and converting it into an abstract syntax tree. The query executor is responsible for transforming the abstract syntax tree into the query execution logic and creates the metadata query according to the query condition of the SQL statement. Since TAOS SQL does not currently include complex nested queries and pipeline query processing mechanism, there is no longer need for query plan optimization and physical query plan conversions. The second-stage aggregator is responsible for performing the aggregation of the independent results returned by query involved data nodes at the client side to generate final results. The continuous query manager is dedicated to managing the continuous queries created by users, including issuing fixed-interval query requests and writing the results back to TDengine or returning to the client application as needed. Also, the client is also responsible for retrying after the query fails, canceling the query request, and maintaining the connection heartbeat and reporting the query status to the management node.
-
-Management Node. The management node keeps the metadata of all the data of the entire cluster system, provides the metadata of the data required for the query from the client node, and divides the query request according to the load condition of the cluster. The super table contains information about all the tables created according to the super table, so the query processor (Query Executor) of the management node is responsible for the query processing of the tags of tables and returns the table information satisfying the tag query. Besides, the management node maintains the query status of the cluster in the Query Status Manager component, in which the metadata of all queries that are currently executing are temporarily stored in-memory buffer. When the client issues *show queries* command to management node, current running queries information is returned to the client.
-
-Data Node. The data node, responsible for storing all data of the database, consists of query executor, query processing scheduler, query task queue, and other related components. Once the query requests from the client received, they are put into query task queue and waiting to be processed by query executor. The query executor extracts the query request from the query task queue and invokes the query optimizer to perform the basic optimization for the query execution plan. And then query executor scans the qualified data blocks in both cache and disk to obtain qualified data and return the calculated results. Besides, the data node also needs to respond to management information and commands from the management node. For example, after the *kill query* received from the management node, the query task needs to be stopped immediately.
-
-
-Fig 1. System query processing architecture diagram (only query related components)
-
-#### Query Process Design
-
-The client, the management node, and the data node cooperate to complete the entire query processing of TDengine. Let's take a concrete SQL query as an example to illustrate the whole query processing flow. The SQL statement is to query on super table *FOO_SUPER_TABLE* to get the total number of records generated on January 12, 2019, from the table, of which TAG_LOC equals to 'beijing'. The SQL statement is as follows:
-
-```sql
-SELECT COUNT(*)
-FROM FOO_SUPER_TABLE
-WHERE TAG_LOC = 'beijing' AND TS >= '2019-01-12 00:00:00' AND TS < '2019-01-13 00:00:00'
-```
-
-First, the client invokes the TAOS SQL parser to parse and validate the SQL statement, then generates a syntax tree, and extracts the object of the query - the super table *FOO_SUPER_TABLE*, and then the parser sends requests with filtering information (TAG_LOC='beijing') to management node to get the corresponding metadata about *FOO_SUPER_TABLE*.
-
-Once the management node receives the request for metadata acquisition, first finds the super table *FOO_SUPER_TABLE* basic information, and then applies the query condition (TAG_LOC='beijing') to filter all the related tables created according to it. And finally, the query executor returns the metadata information that satisfies the query request to the client.
-
-After the client obtains the metadata information of *FOO_SUPER_TABLE*, the query executor initiates a query request with timestamp range filtering condition (TS >= '2019- 01-12 00:00:00' AND TS < '2019-01-13 00:00:00') to all nodes that hold the corresponding data according to the information about data distribution in metadata.
-
-The data node receives the query sent from the client, converts it into an internal structure and puts it into the query task queue to be executed by query executor after optimizing the execution plan. When the query result is obtained, the query result is returned to the client. It should be noted that the data nodes perform the query process independently of each other, and rely solely on their data and content for processing.
-
-When all data nodes involved in the query return results, the client aggregates the result sets from each data node. In this case, all results are accumulated to generate the final query result. The second stage of aggregation is not always required for all queries. For example, a column selection query does not require a second-stage aggregation at all.
-
-#### REST Query Process
-
-In addition to C/C++, Python, and JDBC interface, TDengine also provides a REST interface based on the HTTP protocol, which is different from using the client application programming interface. When the user uses the REST interface, all the query processing is completed on the server-side, and the user's application is not involved in query processing anymore. After the query processing is completed, the result is returned to the client through the HTTP JSON string.
-
-
-Fig. 2 REST query architecture
-
-When a client uses an HTTP-based REST query interface, the client first establishes a connection with the HTTP connector at the data node and then uses the token to ensure the reliability of the request through the REST signature mechanism. For the data node, after receiving the request, the HTTP connector invokes the embedded client program to initiate a query processing, and then the embedded client parses the SQL statement from the HTTP connector and requests the management node to get metadata as needed. After that, the embedded client sends query requests to the same data node or other nodes in the cluster and aggregates the calculation results on demand. Finally, you also need to convert the result of the query into a JSON format string and return it to the client via an HTTP response. After the HTTP connector receives the request SQL, the subsequent process processing is completely consistent with the query processing using the client application development kit.
-
-It should be noted that during the entire processing, the client application is no longer involved in, and is only responsible for sending SQL requests through the HTTP protocol and receiving the results in JSON format. Besides, each data node is embedded with an HTTP connector and a client, so any data node in the cluster received requests from a client, the data node can initiate the query and return the result to the client through the HTTP protocol, with transfer the request to other data nodes.
-
-#### Technology
-
-Because TDengine stores data and tags value separately, the tag value is kept in the management node and directly associated with each table instead of records, resulting in a great reduction of the data storage. Therefore, the tag value can be managed by a fully in-memory structure. First, the filtering of the tag data can drastically reduce the data size involved in the second phase of the query. The query processing for the data is performed at the data node. TDengine takes advantage of the immutable characteristics of IoT data by calculating the maximum, minimum, and other statistics of the data in one data block on each saved data block, to effectively improve the performance of query processing. If the query process involves all the data of the entire data block, the pre-computed result is used directly, and the content of the data block is no longer needed. Since the size of disk space required to store the pre-computation result is much smaller than the size of the specific data, the pre-computation result can greatly reduce the disk IO and speed up the query processing.
-
-TDengine employs column-oriented data storage techniques. When the data block is involved to be loaded from the disk for calculation, only the required column is read according to the query condition, and the read overhead can be minimized. The data of one column is stored in a contiguous memory block and therefore can make full use of the CPU L2 cache to greatly speed up the data scanning. Besides, TDengine utilizes the eagerly responding mechanism and returns a partial result before the complete result is acquired. For example, when the first batch of results is obtained, the data node immediately returns it directly to the client in case of a column select query.
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/Super Table-ch.md b/documentation20/webdocs/markdowndocs/Super Table-ch.md
deleted file mode 100644
index e5c77471570a76e608d59a0dca10462315460337..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Super Table-ch.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# 超级表STable:多表聚合
-
-TDengine要求每个数据采集点单独建表,这样能极大提高数据的插入/查询性能,但是导致系统中表的数量猛增,让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度,TDengine引入了超级表STable (Super Table)的概念。
-
-## 什么是超级表
-
-STable是同一类型数据采集点的抽象,是同类型采集实例的集合,包含多张数据结构一样的子表。每个STable为其子表定义了表结构和一组标签:表结构即表中记录的数据列及其数据类型;标签名和数据类型由STable定义,标签值记录着每个子表的静态信息,用以对子表进行分组过滤。子表本质上就是普通的表,由一个时间戳主键和若干个数据列组成,每行记录着具体的数据,数据查询操作与普通表完全相同;但子表与普通表的区别在于每个子表从属于一张超级表,并带有一组由STable定义的标签值。每种类型的采集设备可以定义一个STable。数据模型定义表的每列数据的类型,如温度、压力、电压、电流、GPS实时位置等,而标签信息属于Meta Data,如采集设备的序列号、型号、位置等,是静态的,是表的元数据。用户在创建表(数据采集点)时指定STable(采集类型)外,还可以指定标签的值,也可事后增加或修改。
-
-TDengine扩展标准SQL语法用于定义STable,使用关键词tags指定标签信息。语法如下:
-
-```mysql
-CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
-```
-
-其中tag_name是标签名,tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型,标签的个数最多为6个,名字不能与系统关键词相同,也不能与其他列名相同。如:
-
-```mysql
-create table thermometer (ts timestamp, degree float)
-tags (location binary(20), type int)
-```
-
-上述SQL创建了一个名为thermometer的STable,带有标签location和标签type。
-
-为某个采集点创建表时,可以指定其所属的STable以及标签的值,语法如下:
-
-```mysql
-CREATE TABLE USING TAGS (tag_value1,...)
-```
-
-沿用上面温度计的例子,使用超级表thermometer建立单个温度计数据表的语句如下:
-
-```mysql
-create table t1 using thermometer tags (‘beijing’, 10)
-```
-
-上述SQL以thermometer为模板,创建了名为t1的表,这张表的Schema就是thermometer的Schema,但标签location值为‘beijing’,标签type值为10。
-
-用户可以使用一个STable创建数量无上限的具有不同标签的表,从这个意义上理解,STable就是若干具有相同数据模型,不同标签的表的集合。与普通表一样,用户可以创建、删除、查看超级表STable,大部分适用于普通表的查询操作都可运用到STable上,包括各种聚合和投影选择函数。除此之外,可以设置标签的过滤条件,仅对STbale中部分表进行聚合查询,大大简化应用的开发。
-
-TDengine对表的主键(时间戳)建立索引,暂时不提供针对数据模型中其他采集量(比如温度、压力值)的索引。每个数据采集点会采集若干数据记录,但每个采集点的标签仅仅是一条记录,因此数据标签在存储上没有冗余,且整体数据规模有限。TDengine将标签数据与采集的动态数据完全分离存储,而且针对STable的标签建立了高性能内存索引结构,为标签提供全方位的快速操作支持。用户可按照需求对其进行增删改查(Create,Retrieve,Update,Delete,CRUD)操作。
-
-STable从属于库,一个STable只属于一个库,但一个库可以有一到多个STable, 一个STable可有多个子表。
-
-## 超级表管理
-
-- 创建超级表
-
- ```mysql
- CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
- ```
-
- 与创建表的SQL语法相似。但需指定TAGS字段的名称和类型。
-
- 说明:
-
- 1. TAGS列总长度不能超过512 bytes;
- 2. TAGS列的数据类型不能是timestamp和nchar类型;
- 3. TAGS列名不能与其他列名相同;
- 4. TAGS列名不能为预留关键字.
-
-- 显示已创建的超级表
-
- ```mysql
- show stables;
- ```
-
- 查看数据库内全部STable,及其相关信息,包括STable的名称、创建时间、列数量、标签(TAG)数量、通过该STable建表的数量。
-
-- 删除超级表
-
- ```mysql
- DROP TABLE
- ```
-
- Note: 删除STable不会级联删除通过STable创建的表;相反删除STable时要求通过该STable创建的表都已经被删除。
-
-- 查看属于某STable并满足查询条件的表
-
- ```mysql
- SELECT TBNAME,[TAG_NAME,…] FROM WHERE <[=|=<|>=|<>] values..> ([AND|OR] …)
- ```
-
- 查看属于某STable并满足查询条件的表。说明:TBNAME为关键词,显示通过STable建立的子表表名,查询过程中可以使用针对标签的条件。
-
- ```mysql
- SELECT COUNT(TBNAME) FROM WHERE <[=|=<|>=|<>] values..> ([AND|OR] …)
- ```
-
- 统计属于某个STable并满足查询条件的子表的数量
-
-## 写数据时自动建子表
-
-在某些特殊场景中,用户在写数据时并不确定某个设备的表是否存在,此时可使用自动建表语法来实现写入数据时里用超级表定义的表结构自动创建不存在的子表,若该表已存在则不会建立新表。注意:自动建表语句只能自动建立子表而不能建立超级表,这就要求超级表已经被事先定义好。自动建表语法跟insert/import语法非常相似,唯一区别是语句中增加了超级表和标签信息。具体语法如下:
-
-```mysql
-INSERT INTO USING TAGS (, ...) VALUES (field_value, ...) (field_value, ...) ...;
-```
-
-向表tb_name中插入一条或多条记录,如果tb_name这张表不存在,则会用超级表stb_name定义的表结构以及用户指定的标签值(即tag1_value…)来创建名为tb_name新表,并将用户指定的值写入表中。如果tb_name已经存在,则建表过程会被忽略,系统也不会检查tb_name的标签是否与用户指定的标签值一致,也即不会更新已存在表的标签。
-
-```mysql
-INSERT INTO USING TAGS (, ...) VALUES (, ...) (, ...) ... USING TAGS(, ...) VALUES (, ...) ...;
-```
-
-向多张表tb1_name,tb2_name等插入一条或多条记录,并分别指定各自的超级表进行自动建表。
-
-## STable中TAG管理
-
-除了更新标签的值的操作是针对子表进行,其他所有的标签操作(添加标签、删除标签等)均只能作用于STable,不能对单个子表操作。对STable添加标签以后,依托于该STable建立的所有表将自动增加了一个标签,对于数值型的标签,新增加的标签的默认值是0.
-
-- 添加新的标签
-
- ```mysql
- ALTER TABLE ADD TAG
- ```
-
- 为STable增加一个新的标签,并指定新标签的类型。标签总数不能超过6个。
-
-- 删除标签
-
- ```mysql
- ALTER TABLE DROP TAG
- ```
-
- 删除超级表的一个标签,从超级表删除某个标签后,该超级表下的所有子表也会自动删除该标签。
-
- 说明:第一列标签不能删除,至少需要为STable保留一个标签。
-
-- 修改标签名
-
- ```mysql
- ALTER TABLE CHANGE TAG
- ```
-
- 修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。
-
-- 修改子表的标签值
-
- ```mysql
- ALTER TABLE SET TAG =
- ```
-
-## STable多表聚合
-
-针对所有的通过STable创建的子表进行多表聚合查询,支持按照全部的TAG值进行条件过滤,并可将结果按照TAGS中的值进行聚合,暂不支持针对binary类型的模糊匹配过滤。语法如下:
-
-```mysql
-SELECT function,…
- FROM
- WHERE <[=|<=|>=|<>] values..> ([AND|OR] …)
- INTERVAL ( [, offset])
- GROUP BY , …
- ORDER BY
- SLIMIT
- SOFFSET
- LIMIT
- OFFSET
-```
-
-**说明**:
-
-超级表聚合查询,TDengine目前支持以下聚合\选择函数:sum、count、avg、first、last、min、max、top、bottom,以及针对全部或部分列的投影操作,使用方式与单表查询的计算过程相同。暂不支持其他类型的聚合计算和四则运算。当前所有的函数及计算过程均不支持嵌套的方式进行执行。
-
- 不使用GROUP BY的查询将会对超级表下所有满足筛选条件的表按时间进行聚合,结果输出默认是按照时间戳单调递增输出,用户可以使用ORDER BY _c0 ASC|DESC选择查询结果时间戳的升降排序;使用GROUP BY 的聚合查询会按照tags进行分组,并对每个组内的数据分别进行聚合,输出结果为各个组的聚合结果,组间的排序可以由ORDER BY 语句指定,每个分组内部,时间序列是单调递增的。
-
-使用SLIMIT/SOFFSET语句指定组间分页,即指定结果集中输出的最大组数以及对组起始的位置。使用LIMIT/OFFSET语句指定组内分页,即指定结果集中每个组内最多输出多少条记录以及记录起始的位置。
-
-## STable使用示例
-
-以温度传感器采集时序数据作为例,示范STable的使用。 在这个例子中,对每个温度计都会建立一张表,表名为温度计的ID,温度计读数的时刻记为ts,采集的值记为degree。通过tags给每个采集器打上不同的标签,其中记录温度计的地区和类型,以方便我们后面的查询。所有温度计的采集量都一样,因此我们用STable来定义表结构。
-
-###定义STable表结构并使用它创建子表
-
-创建STable语句如下:
-
-```mysql
-CREATE TABLE thermometer (ts timestamp, degree double)
-TAGS(location binary(20), type int)
-```
-
-假设有北京,天津和上海三个地区的采集器共4个,温度采集器有3种类型,我们就可以对每个采集器建表如下:
-
-```mysql
-CREATE TABLE therm1 USING thermometer TAGS (’beijing’, 1);
-CREATE TABLE therm2 USING thermometer TAGS (’beijing’, 2);
-CREATE TABLE therm3 USING thermometer TAGS (’tianjin’, 1);
-CREATE TABLE therm4 USING thermometer TAGS (’shanghai’, 3);
-```
-
-其中therm1,therm2,therm3,therm4是超级表thermometer四个具体的子表,也即普通的Table。以therm1为例,它表示采集器therm1的数据,表结构完全由thermometer定义,标签location=”beijing”, type=1表示therm1的地区是北京,类型是第1类的温度计。
-
-###写入数据
-
-注意,写入数据时不能直接对STable操作,而是要对每张子表进行操作。我们分别向四张表therm1,therm2, therm3, therm4写入一条数据,写入语句如下:
-
-```mysql
-INSERT INTO therm1 VALUES (’2018-01-01 00:00:00.000’, 20);
-INSERT INTO therm2 VALUES (’2018-01-01 00:00:00.000’, 21);
-INSERT INTO therm3 VALUES (’2018-01-01 00:00:00.000’, 24);
-INSERT INTO therm4 VALUES (’2018-01-01 00:00:00.000’, 23);
-```
-
-###按标签聚合查询
-
-查询位于北京(beijing)和天津(tianjing)两个地区的温度传感器采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)、最低温度min(degree),并将结果按所处地域(location)和传感器类型(type)进行聚合。
-
-```mysql
-SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
-FROM thermometer
-WHERE location=’beijing’ or location=’tianjing’
-GROUP BY location, type
-```
-
-###按时间周期聚合查询
-
-查询仅位于北京以外地区的温度传感器最近24小时(24h)采样值的数量count(*)、平均温度avg(degree)、最高温度max(degree)和最低温度min(degree),将采集结果按照10分钟为周期进行聚合,并将结果按所处地域(location)和传感器类型(type)再次进行聚合。
-
-```mysql
-SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
-FROM thermometer
-WHERE name<>’beijing’ and ts>=now-1d
-INTERVAL(10M)
-GROUP BY location, type
-```
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/Super Table.md b/documentation20/webdocs/markdowndocs/Super Table.md
deleted file mode 100644
index a213567f6d67ed351fac67b821f4db1929fa3a22..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/Super Table.md
+++ /dev/null
@@ -1,195 +0,0 @@
-# STable: Super Table
-
-"One Table for One Device" design can improve the insert/query performance significantly for a single device. But it has a side effect, the aggregation of multiple tables becomes hard. To reduce the complexity and improve the efficiency, TDengine introduced a new concept: STable (Super Table).
-
-## What is a Super Table
-
-STable is an abstract and a template for a type of device. A STable contains a set of devices (tables) that have the same schema or data structure. Besides the shared schema, a STable has a set of tags, like the model, serial number and so on. Tags are used to record the static attributes for the devices and are used to group a set of devices (tables) for aggregation. Tags are metadata of a table and can be added, deleted or changed.
-
-TDengine does not save tags as a part of the data points collected. Instead, tags are saved as metadata. Each table has a set of tags. To improve query performance, tags are all cached and indexed. One table can only belong to one STable, but one STable may contain many tables.
-
-Like a table, you can create, show, delete and describe STables. Most query operations on tables can be applied to STable too, including the aggregation and selector functions. For queries on a STable, if no tags filter, the operations are applied to all the tables created via this STable. If there is a tag filter, the operations are applied only to a subset of the tables which satisfy the tag filter conditions. It will be very convenient to use tags to put devices into different groups for aggregation.
-
-##Create a STable
-
-Similiar to creating a standard table, syntax is:
-
-```mysql
-CREATE TABLE ( TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
-```
-
-New keyword "tags" is introduced, where tag_name is the tag name, and tag_type is the associated data type.
-
-Note:
-
-1. The bytes of all tags together shall be less than 512
-2. Tag's data type can not be time stamp or nchar
-3. Tag name shall be different from the field name
-4. Tag name shall not be the same as system keywords
-5. Maximum number of tags is 6
-
-For example:
-
-```mysql
-create table thermometer (ts timestamp, degree float)
-tags (location binary(20), type int)
-```
-
-The above statement creates a STable thermometer with two tag "location" and "type"
-
-##Create a Table via STable
-
-To create a table for a device, you can use a STable as its template and assign the tag values. The syntax is:
-
-```mysql
-CREATE TABLE USING TAGS (tag_value1,...)
-```
-
-You can create any number of tables via a STable, and each table may have different tag values. For example, you create five tables via STable thermometer below:
-
-```mysql
- create table t1 using thermometer tags (‘beijing’, 10);
- create table t2 using thermometer tags (‘beijing’, 20);
- create table t3 using thermometer tags (‘shanghai’, 10);
- create table t4 using thermometer tags (‘shanghai’, 20);
- create table t5 using thermometer tags (‘new york’, 10);
-```
-
-## Aggregate Tables via STable
-
-You can group a set of tables together by specifying the tags filter condition, then apply the aggregation operations. The result set can be grouped and ordered based on tag value. Syntax is:
-
-```mysql
-SELECT function,…
- FROM
- WHERE <[=|<=|>=|<>] values..> ([AND|OR] …)
- INTERVAL ()
- GROUP BY , …
- ORDER BY
- SLIMIT
- SOFFSET
- LIMIT
- OFFSET
-```
-
-For the time being, STable supports only the following aggregation/selection functions: *sum, count, avg, first, last, min, max, top, bottom*, and the projection operations, the same syntax as a standard table. Arithmetic operations are not supported, embedded queries not either.
-
-*INTERVAL* is used for the aggregation over a time range.
-
-If *GROUP BY* is not used, the aggregation is applied to all the selected tables, and the result set is output in ascending order of the timestamp, but you can use "*ORDER BY _c0 ASC|DESC*" to specify the order you like.
-
-If *GROUP BY * is used, the aggregation is applied to groups based on tags. Each group is aggregated independently. Result set is a group of aggregation results. The group order is decided by *ORDER BY *. Inside each group, the result set is in the ascending order of the time stamp.
-
-*SLIMIT/SOFFSET* are used to limit the number of groups and starting group number.
-
-*LIMIT/OFFSET* are used to limit the number of records in a group and the starting rows.
-
-###Example 1:
-
-Check the average, maximum, and minimum temperatures of Beijing and Shanghai, and group the result set by location and type. The SQL statement shall be:
-
-```mysql
-SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
-FROM thermometer
-WHERE location=’beijing’ or location=’tianjing’
-GROUP BY location, type
-```
-
-### Example 2:
-
-List the number of records, average, maximum, and minimum temperature every 10 minutes for the past 24 hours for all the thermometers located in Beijing with type 10. The SQL statement shall be:
-
-```mysql
-SELECT COUNT(*), AVG(degree), MAX(degree), MIN(degree)
-FROM thermometer
-WHERE name=’beijing’ and type=10 and ts>=now-1d
-INTERVAL(10M)
-```
-
-## Create Table Automatically
-
-Insert operation will fail if the table is not created yet. But for STable, TDengine can create the table automatically if the application provides the STable name, table name and tags' value when inserting data points. The syntax is:
-
-```mysql
-INSERT INTO USING TAGS (, ...) VALUES (field_value, ...) (field_value, ...) ... USING TAGS(, ...) VALUES (, ...) ...;
-```
-
-When inserting data points into table tb_name, the system will check if table tb_name is created or not. If it is already created, the data points will be inserted as usual. But if the table is not created yet, the system will create the table tb_bame using STable stb_name as the template with the tags. Multiple tables can be specified in the SQL statement.
-
-## Management of STables
-
-After you can create a STable, you can describe, delete, change STables. This section lists all the supported operations.
-
-### Show STables in current DB
-
-```mysql
-show stables;
-```
-
-It lists all STables in current DB, including the name, created time, number of fileds, number of tags, and number of tables which are created via this STable.
-
-### Describe a STable
-
-```mysql
-DESCRIBE
-```
-
-It lists the STable's schema and tags
-
-### Drop a STable
-
-```mysql
-DROP TABLE
-```
-
-To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.
-
-### List the Associated Tables of a STable
-
-```mysql
-SELECT TBNAME,[TAG_NAME,…] FROM WHERE <[=|=<|>=|<>] values..> ([AND|OR] …)
-```
-
-It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable.
-
-```mysql
-SELECT COUNT(TBNAME) FROM WHERE <[=|=<|>=|<>] values..> ([AND|OR] …)
-```
-
-The above SQL statement will list the number of tables in a STable, which satisfy the filter condition.
-
-## Management of Tags
-
-You can add, delete and change the tags for a STable, and you can change the tag value of a table. The SQL commands are listed below.
-
-###Add a Tag
-
-```mysql
-ALTER TABLE ADD TAG
-```
-
-It adds a new tag to the STable with a data type. The maximum number of tags is 6.
-
-###Drop a Tag
-
-```mysql
-ALTER TABLE DROP TAG
-```
-
-It drops a tag from a STable. The first tag could not be deleted, and there must be at least one tag.
-
-###Change a Tag's Name
-
-```mysql
-ALTER TABLE CHANGE TAG
-```
-
-It changes the name of a tag from old to new.
-
-###Change the Tag's Value
-
-```mysql
-ALTER TABLE SET TAG =
-```
-
-It changes a table's tag value to a new one.
diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL.md b/documentation20/webdocs/markdowndocs/TAOS SQL.md
deleted file mode 100644
index ae2d9d34a203de41d0deed523d16095c8830b452..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/TAOS SQL.md
+++ /dev/null
@@ -1,636 +0,0 @@
-# TAOS SQL
-
-TDengine provides a SQL like query language to insert or query data. You can execute the SQL statements through the TDengine Shell, or through C/C++, Java(JDBC), Python, Restful, Go, and Node.js APIs to interact with the `taosd` service.
-
-Before reading through, please have a look at the conventions used for syntax descriptions here in this documentation.
-
-* Squared brackets ("[]") indicate optional arguments or clauses
-* Curly braces ("{}") indicate that one member from a set of choices in the braces must be chosen
-* A single verticle line ("|") works a separator for multiple optional args or clauses
-* Dots ("…") means repeating for as many times
-
-## Data Types
-
-### Timestamp
-
-The timestamp is the most important data type in TDengine. The first column of each table must be **`TIMESTAMP`** type, but other columns can also be **`TIMESTAMP`** type. The following rules for timestamp:
-
-* String Format: `'YYYY-MM-DD HH:mm:ss.MS'`, which represents the year, month, day, hour, minute and second and milliseconds. For example,`'2017-08-12 18:52:58.128'` is a valid timestamp string. Note: timestamp string must be quoted by either single quote or double quote.
-
-* Epoch Time: a timestamp value can also be a long integer representing milliseconds since the epoch. For example, the values in the above example can be represented as an epoch `1502535178128` in milliseconds. Please note the epoch time doesn't need any quotes.
-
-* Internal Function **`NOW`** : this is the current time of the server
-* If timestamp is 0 when inserting a record, timestamp will be set to the current time of the server
-* Arithmetic operations can be applied to timestamp. For example: `now-2h` represents a timestamp which is 2 hours ago from the current server time. Units include `a` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `n` (months), `y` (years). **`NOW`** can be used in either insertions or queries.
-
-Default time precision is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond in [system configuration](../administrator/#Configuration-on-Server). For epoch time, the long integer shall be microseconds since the epoch. For the above string format, MS shall be six digits.
-
-### Data Types
-
-The full list of data types is listed below. For string types of data, we will use ***M*** to indicate the maximum length of that type.
-
-| | Data Type | Bytes | Note |
-| ---- | :---------: | :-----: | ------------------------------------------------------------ |
-| 1 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] |
-| 2 | SMALLINT | 2 | A nullable integer type with a range of [-32767, 32767] |
-| 3 | INT | 4 | A nullable integer type with a range of [-2^31+1, 2^31-1 ] |
-| 4 | BIGINT | 8 | A nullable integer type with a range of [-2^59, 2^59 ] |
-| 5 | FLOAT | 4 | A standard nullable float type with 6 -7 significant digits and a range of [-3.4E38, 3.4E38] |
-| 6 | DOUBLE | 8 | A standard nullable double float type with 15-16 significant digits and a range of [-1.7E308, 1.7E308] |
-| 7 | BOOL | 1 | A nullable boolean type, [**`true`**, **`false`**] |
-| 8 | TIMESTAMP | 8 | A nullable timestamp type with the same usage as the primary column timestamp |
-| 9 | BINARY(*M*) | *M* | A nullable string type whose length is *M*, error should be threw with exceeded chars, the maximum length of *M* is 16374, but as maximum row size is 16K bytes, the actual upper limit will generally less than 16374. This type of string only supports ASCii encoded chars. |
-| 10 | NCHAR(*M*) | 4 * *M* | A nullable string type whose length is *M*, error should be threw with exceeded chars. The **`NCHAR`** type supports Unicode encoded chars. |
-
-All the keywords in a SQL statement are case-insensitive, but strings values are case-sensitive and must be quoted by a pair of `'` or `"`. To quote a `'` or a `"` , you can use the escape character `\`.
-
-## Database Management
-
-- **Create a Database**
- ```mysql
- CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]
- ```
- Option: `KEEP` is used for data retention policy. The data records will be removed once keep-days are passed. There are more parameters related to DB storage, please check [system configuration](../administrator/#Configuration-on-Server).
-
-
-- **Use a Database**
-
- ```mysql
- USE db_name
- ```
- Use or switch the current database.
-
-
-- **Drop a Database**
- ```mysql
- DROP DATABASE [IF EXISTS] db_name
- ```
- Remove a database, all the tables inside the DB will be removed too, be careful.
-
-
-- **List all Databases**
-
- ```mysql
- SHOW DATABASES
- ```
-
-
-## Table Management
-
-- **Create a Table**
-
- ```mysql
- CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])
- ```
- Note:
-
- 1) The first column must be a `timestamp`, and the system will set it as the primary key.
- 2) The maximum number of columns is 1024, the minimum number of columns is 2.
- 3) The maximum length: database name is 33, table nume is 193, column name is 65.
- 4) The record size is limited to 16k bytes.
- 5) The total tag length is limited 16k bytes, the maximum number of tags is 128.
- 6) For `binary` or `nchar` data types, the length must be specified. For example, binary(20) means a binary data type with 20 bytes.
-
-
-- **Drop a Table**
-
- ```mysql
- DROP TABLE [IF EXISTS] tb_name
- ```
-
-
-- **List all Tables **
- ```mysql
- SHOW TABLES [LIKE tb_name_wildcar]
- ```
- It shows all tables in the current DB.
-
- Note: Wildcard characters can be used in the table name to filter tables.
- Wildcard characters:
- 1) ’%’ means 0 to any number of characters.
- 2)’_’ underscore means exactly one character.
-
-
-- **Print Table Schema**
-
- ```mysql
- DESCRIBE tb_name
- ```
-
-
-- **Add a Column**
-
- ```mysql
- ALTER TABLE tb_name ADD COLUMN field_name data_type
- ```
-
-
-- **Drop a Column**
-
- ```mysql
- ALTER TABLE tb_name DROP COLUMN field_name
- ```
- If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.
-
-**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with the table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` even though `demo` is not the currently selected DB.
-
-## STable Management
-
-- **Create a STable**
-
- ```mysql
- CREATE TABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3])
- ```
- Create a STable is the same as you create a table, but the tag name and type shoudl be specified
- Note:
-
- 1) The total tag length should not be exceeded 512 bytes
- 2) The type of tag cannot be timestamp
- 3) Tag name should not be the same as other column name or tag name
- 4) Tag name should not be any of Taos key words.
-
-
-- **Drop a STable**
-
- ```mysql
- DROP TABLE [IF EXISTS] tb_name
- ```
- Delete a STable will also delete all the tables created using the STable.
-
-
-- **List all STables**
-
- ```mysql
- SHOW STABLES [LIKE tb_name_wildcar]
- ```
- It shows all STables in the current DB.
-
- Note: Wildcard characters can be used in the table name to filter tables.
- Wildcard characters:
- 1) ’%’ means 0 to any number of characters.
- 2)’_’ underscore means exactly one character.
-
-
-- **Print STable Schema**
-
- ```mysql
- DESCRIBE stb_name
- ```
-
-- **Add a Column**
-
- ```mysql
- ALTER TABLE stb_name ADD COLUMN field_name data_type
- ```
-
-- **Drop a Column**
-
- ```mysql
- ALTER TABLE stb_name DROP COLUMN field_name
- ```
- If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.
-
-**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with the table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` even though `demo` is not the currently selected DB.
-
-## STable Tag Management
-
-- **Add a tag**
-
- ```mysql
- ALTER TABLE stb_name ADD TAG new_tag_name tag_type
- ```
- Add a tag and specify tag type for STable. The total number of tags should be no more than 128.
-
-- **Drop a tag**
-
- ```mysql
- ALTER TABLE stb_name DROP TAG tag_name
- ```
- Delete a tag from STable, it will also delete the tag for the tables created using STable.
-
-
-- **Modify tag name**
-
- ```mysql
- ALTER TABLE stb_name CHANGE TAG old_tag_name new_tag_name
- ```
- Modify a tag name for a STable, it will also modify the tag name for the tables created using STable.
-
-
-- **Change tag value**
-
- ```mysql
- ALTER TABLE tb_name SET TAG tag_name=new_tag_value
- ```
-**Note**: 'Add a tag', 'Drop a tag', 'Modify tag name' used for STable, 'Change tag value' used for table.
-
-
-## Inserting Records
-
-- **Insert a Record**
- ```mysql
- INSERT INTO tb_name VALUES (field_value, ...);
- ```
- Insert a data record into table tb_name
-
-
-- **Insert a Record with Selected Columns**
-
- ```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES(field1_value, ...)
- ```
- Insert a data record into table tb_name, with data in selected columns. If a column is not selected, the system will put NULL there. First column (time stamp ) cant not be null, it must be inserted.
-
-
-- **Insert a Batch of Records**
-
- ```mysql
- INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
- ```
- Insert multiple data records into the table
-
-
-- **Insert a Batch of Records with Selected Columns**
-
- ```mysql
- INSERT INTO tb_name (field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
- ```
-
-
-- **Insert Records into Multiple Tables**
-
- ```mysql
- INSERT INTO tb1_name VALUES (field1_value1, ...)(field1_value2, ...)...
- tb2_name VALUES (field1_value1, ...)(field1_value2, ...)...;
- ```
- Insert data records into table tb1_name and tb2_name
-
-
-- **Insert Records into Multiple Tables with Selected Columns**
-
- ```mysql
- INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value1, ...)
- tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
- ```
-
-Note: 1. If the timestamp is 0, the time stamp will be set to the system time on the server.
- 2. The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (the number of days the data is retained), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (the time span in which the data file stores data, in days). Both keep and days can be specified when creating the database. The default values are 3650 days and 10 days, respectively.
-
-**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT.
-
-## Data Query
-
-###Query Syntax:
-
-```mysql
-SELECT select_expr [, select_expr ...]
- FROM {tb_name_list}
- [WHERE where_condition]
- [INTERVAL [interval_offset,] interval_val]
- [FILL fill_val]
- [SLIDING fill_val]
- [GROUP BY col_list]
- [ORDER BY col_list { DESC | ASC }]
- [HAVING expr_list]
- [SLIMIT limit_val [, SOFFSET offset_val]]
- [LIMIT limit_val [, OFFSET offset_val]]
- [>> export_file]
-
-SELECT function_list FROM tb_name
- [WHERE where_condition]
- [LIMIT limit [, OFFSET offset]]
- [>> export_file]
-```
-
-- To query a table, use `*` to select all data from a table; or a specified list of expressions `expr_list` of columns. The SQL expression can contain alias and arithmetic operations between numeric typed columns.
-- For the `WHERE` conditions, use logical operations to filter the timestamp column and all numeric columns, and wild cards to filter the two string typed columns.
-- Sort the result set by the first column timestamp `_c0` (or directly use the timestamp column name) in either descending or ascending order (by default). "Order by" could not be applied to other columns.
-- Use `LIMIT` and `OFFSET` to control the number of rows returned and the starting position of the retrieved rows. LIMIT/OFFSET is applied after "order by" operations.
-- Export the retrieved result set into a CSV file using `>>`. The target file's full path should be explicitly specified in the statement.
-
-###Supported Operations of Data Filtering:
-
-| Operation | Note | Applicable Data Types |
-| --------- | ----------------------------- | ------------------------------------- |
-| > | larger than | **`timestamp`** and all numeric types |
-| < | smaller than | **`timestamp`** and all numeric types |
-| >= | larger than or equal to | **`timestamp`** and all numeric types |
-| <= | smaller than or equal to | **`timestamp`** and all numeric types |
-| = | equal to | all types |
-| <> | not equal to | all types |
-| % | match with any char sequences | **`binary`** **`nchar`** |
-| _ | match with a single char | **`binary`** **`nchar`** |
-
-1. For two or more conditions, only AND is supported, OR is not supported yet.
-2. For filtering, only a single range is supported. For example, `value>20 and value<30` is a valid condition, but `value<20 AND value<>5` is an invalid condition
-
-### Some Examples
-
-- For the examples below, table tb1 is created via the following statements
-
- ```mysql
- CREATE TABLE tb1 (ts timestamp, col1 int, col2 float, col3 binary(50))
- ```
-
-- Query all the records in tb1 in the last hour:
- ```mysql
- SELECT * FROM tb1 WHERE ts >= NOW - 1h
- ```
-
-
-- Query all the records in tb1 between 2018-06-01 08:00:00.000 and 2018-06-02 08:00:00.000, and filter out only the records whose col3 value ends with 'nny', and sort the records by their timestamp in a descending order:
- ```mysql
- SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC
- ```
-
-
-- Query the sum of col1 and col2 as alias 'complex_metric', and filter on the timestamp and col2 values. Limit the number of returned rows to 10, and offset the result by 5.
- ```mysql
- SELECT (col1 + col2) AS 'complex_metric' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' and col2 > 1.2 LIMIT 10 OFFSET 5
- ```
-
-
-- Query the number of records in tb1 in the last 10 minutes, whose col2 value is larger than 3.14, and export the result to file `/home/testoutpu.csv`.
- ```mysql
- SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv
- ```
-
-## SQL Functions
-
-### Aggregation Functions
-
-TDengine supports aggregations over numerical values, they are listed below:
-
-- **COUNT**
-
- ```mysql
- SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]
- ```
- Function: return the number of rows.
- Return Data Type: `integer`.
- Applicable Data Types: all.
- Applied to: table/STable.
- Note:
-
- 1) `*` can be used for all columns, as long as a column has non-NULL values, it will be counted.
-
- 2) If it is on a specific column, only rows with non-NULL values will be counted
-
-
-- **AVG**
-
- ```mysql
- SELECT AVG(field_name) FROM tb_name [WHERE clause]
- ```
- Function: return the average value of a specific column.
- Return Data Type: `double`.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
-
-
-- **TWA**
-
- ```mysql
- SELECT TWA(field_name) FROM tb_name WHERE clause
- ```
- Function: return the time-weighted average value of a specific column
- Return Data Type: `double`
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`
- Applied to: table/STable
-
-
-- **SUM**
-
- ```mysql
- SELECT SUM(field_name) FROM tb_name [WHERE clause]
- ```
- Function: return the sum of a specific column.
- Return Data Type: `long integer` or `double`.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
-
-
-- **STDDEV**
-
- ```mysql
- SELECT STDDEV(field_name) FROM tb_name [WHERE clause]
- ```
- Function: returns the standard deviation of a specific column.
- Return Data Type: double.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table.
-
-
-- **LEASTSQUARES**
- ```mysql
- SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
- ```
- Function: performs a linear fit to the primary timestamp and the specified column.
- Return Data Type: return a string of the coefficient and the interception of the fitted line.
- Applicable Data Types: all types except timestamp, binary, nchar, bool.
- Applied to: table.
- Note: The timestmap is taken as the independent variable while the specified column value is taken as the dependent variables.
-
-
-### Selector Functions
-
-- **MIN**
-
- ```mysql
- SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]
- ```
- Function: return the minimum value of a specific column.
- Return Data Type: the same data type.
- Applicable Data Types: all types except timestamp, binary, nchar, bool.
- Applied to: table/STable.
-
-
-- **MAX**
- ```mysql
- SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the maximum value of a specific column.
- Return Data Type: the same data type.
- Applicable Data Types: all types except timestamp, binary, nchar, bool.
- Applied to: table/STable.
-
-
-- **FIRST**
-
- ```mysql
- SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the first non-NULL value.
- Return Data Type: the same data type.
- Applicable Data Types: all types.
- Applied to: table/STable.
- Note: To return all columns, use first(*).
-
-
-- **LAST**
- ```mysql
- SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the last non-NULL value.
- Return Data Type: the same data type.
- Applicable Data Types: all types.
- Applied to: table/STable.
- Note: To return all columns, use last(*).
-
-
-- **TOP**
-
- ```mysql
- SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the `k` largest values.
- Return Data Type: the same data type.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note:
- 1) Valid range of `k`: 1≤*k*≤100
- 2) The associated `timestamp` will be returned too.
-
-
-- **BOTTOM**
-
- ```mysql
- SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the `k` smallest values.
- Return Data Type: the same data type.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note:
- 1) valid range of `k`: 1≤*k*≤100;
- 2) The associated `timestamp` will be returned too.
-
-
-- **PERCENTILE**
- ```mysql
- SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: the value of the specified column below which `P` percent of the data points fall.
- Return Data Type: double.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
-
-- **APERCENTILE**
- ```mysql
- SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: the value of the specified column below which `P` percent of the data points fall, it returns approximate value of percentile.
- Return Data Type: double.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note: The range of `P` is `[0, 100]`. When `P=0` , `APERCENTILE` returns the equal value as `MIN`; when `P=100`, `APERCENTILE` returns the equal value as `MAX`. `APERCENTILE` has a much better performance than `PERCENTILE`.
-
-- **LAST_ROW**
- ```mysql
- SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }
- ```
- Function: return the last row.
- Return Data Type: the same data type.
- Applicable Data Types: all types.
- Applied to: table/STable.
- Note: different from last, last_row returns the last row even if it has NULL values.
-
-
-### Transformation Functions
-
-- **DIFF**
- ```mysql
- SELECT DIFF(field_name) FROM tb_name [WHERE clause]
- ```
- Function: return the difference between successive values of the specified column.
- Return Data Type: the same data type.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table.
-
-
-- **SPREAD**
- ```mysql
- SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: return the difference between the maximum and the mimimum value.
- Return Data Type: double.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`
-
-
-- **Arithmetic Operations**
- ```mysql
- SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause]
- ```
- Function: arithmetic operations on the selected columns.
- Return Data Type: double.
- Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
- Applied to: table/STable.
- Note: 1) bracket can be used for operation priority; 2) If a column has NULL value, the result is NULL.
-
-
-## Downsampling
-
-Time-series data are usually sampled by sensors at a very high frequency, but more often we are only interested in the downsampled, aggregated data of each timeline. TDengine provides a convenient way to downsample the highly frequently sampled data points as well as filling the missing data with a variety of interpolation choices.
-
-```mysql
-SELECT function_list FROM tb_name
- [WHERE where_condition]
- INTERVAL (interval)
- [FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
-
-SELECT function_list FROM stb_name
- [WHERE where_condition]
- INTERVAL (interval)
- [FILL ({ VALUE | PREV | NULL | LINEAR})]
- [GROUP BY tags]
-```
-
-The downsampling time window is defined by `interval`, which is at least 10 milliseconds. The query returns a new series of downsampled data that has a series of fixed timestamps with an increment of `interval`.
-
-For the time being, only function count, avg, sum, stddev, leastsquares, percentile, min, max, first, last are supported. Functions that may return multiple rows are not supported.
-
-You can also use `FILL` to interpolate the intervals that don't contain any data.`FILL` currently supports four different interpolation strategies which are listed below:
-
-| Interpolation | Usage |
-| --------------------------------- | ------------------------------------------------------------ |
-| `FILL(VALUE, val1 [, val2, ...])` | Interpolate with specified constants |
-| `FILL(PREV)` | Interpolate with the value at the previous timestamp |
-| `FILL(LINEAR)` | Linear interpolation with the non-null values at the previous timestamp and at the next timestamp |
-| `FILL(NULL)` | Interpolate with **`NULL`** value |
-
- A few downsampling examples:
-
-- Find the number of data points, the maximum value of `col1` and minimum value of `col2` in a tb1 for every 10 minutes in the last 5 hours:
- ```mysql
- SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL (10m)
- ```
-
-
-- Fill the above downsampling results using constant-value interpolation:
- ```mysql
- SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(VALUE, 0, 1, -1)
- ```
- Note that the number of constant values in `FILL()` should be equal or fewer than the number of functions in the `SELECT` clause. Exceeding fill constants will be ignored.
-
-
-- Fill the above downsampling results using `PREV` interpolation:
- ```mysql
- SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(PREV)
- ```
- This will interpolate missing data points with the value at the previous timestamp.
-
-
-- Fill the above downsampling results using `NULL` interpolation:
- ```mysql
- SELECT COUNT(*), MAX(col1), MIN(col2) FROM tb1 WHERE ts > NOW - 5h INTERVAL(10m) FILL(NULL)
- ```
- Fill **`NULL`** to the interpolated data points.
-
-Notes:
-1. `FILL` can generate tons of interpolated data points if the interval is small and the queried time range is large. So always remember to specify a time range when using interpolation. For each query with interpolation, the result set can not exceed 10,000,000 records.
-2. The result set will always be sorted by time in ascending order.
-3. If the query object is a supertable, then all the functions will be applied to all the tables that qualify the `WHERE` conditions. If the `GROUP BY` clause is also applied, the result set will be sorted ascendingly by time in each single group, otherwise, the result set will be sorted ascendingly by time as a whole.
-
diff --git a/documentation20/webdocs/markdowndocs/administrator.md b/documentation20/webdocs/markdowndocs/administrator.md
deleted file mode 100644
index e118be525d117f0dc85f91c9fc3b66bf676ef30a..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/administrator.md
+++ /dev/null
@@ -1,231 +0,0 @@
-#Administrator
-
-## Directory and Files
-
-After TDengine is installed, by default, the following directories will be created:
-
-| Directory/File | Description |
-| ---------------------- | :------------------------------ |
-| /etc/taos/taos.cfg | TDengine configuration file |
-| /usr/local/taos/driver | TDengine dynamic link library |
-| /var/lib/taos | TDengine default data directory |
-| /var/log/taos | TDengine default log directory |
-| /usr/local/taos/bin. | TDengine executables |
-
-### Executables
-
-All TDengine executables are located at _/usr/local/taos/bin_ , including:
-
-- `taosd`:TDengine server
-- `taos`: TDengine Shell, the command line interface.
-- `taosdump`:TDengine data export tool
-- `rmtaos`: a script to uninstall TDengine
-
-You can change the data directory and log directory setting through the system configuration file
-
-## Configuration on Server
-
-`taosd` is running on the server side, you can change the system configuration file taos.cfg to customize its behavior. By default, taos.cfg is located at /etc/taos, but you can specify the path to configuration file via the command line parameter -c. For example: `taosd -c /home/user` means the configuration file will be read from directory /home/user.
-
-This section lists only the most important configuration parameters. Please check taos.cfg to find all the configurable parameters. **Note: to make your new configurations work, you have to restart taosd after you change taos.cfg**.
-
-- mgmtShellPort: TCP and UDP port between client and TDengine mgmt (default: 6030). Note: 5 successive UDP ports (6030-6034) starting from this number will be used.
-- vnodeShellPort: TCP and UDP port between client and TDengine vnode (default: 6035). Note: 5 successive UDP ports (6035-6039) starting from this number will be used.
-- httpPort: TCP port for RESTful service (default: 6020)
-- dataDir: data directory, default is /var/lib/taos
-- maxUsers: maximum number of users allowed
-- maxDbs: maximum number of databases allowed
-- maxTables: maximum number of tables allowed
-- enableMonitor: turn on/off system monitoring, 0: off, 1: on
-- logDir: log directory, default is /var/log/taos
-- numOfLogLines: maximum number of lines in the log file
-- debugFlag: log level, 131: only error and warnings, 135: all
-
-In different scenarios, data characteristics are different. For example, the retention policy, data sampling period, record size, the number of devices, and data compression may be different. To gain the best performance, you can change the following configurations related to storage:
-
-- days: number of days to cover for a data file
-- keep: number of days to keep the data
-- rows: number of rows of records in a block in data file.
-- comp: compression algorithm, 0: off, 1: standard; 2: maximum compression
-- ctime: period (seconds) to flush data to disk
-- clog: flag to turn on/off Write Ahead Log, 0: off, 1: on
-- tables: maximum number of tables allowed in a vnode
-- cache: cache block size (bytes)
-- tblocks: maximum number of cache blocks for a table
-- abloks: average number of cache blocks for a table
-- precision: timestamp precision, us: microsecond ms: millisecond, default is ms
-
-For an application, there may be multiple data scenarios. The best design is to put all data with the same characteristics into one database. One application may have multiple databases, and every database has its own configuration to maximize the system performance. You can specify the above configurations related to storage when you create a database. For example:
-
-```mysql
-CREATE DATABASE demo DAYS 10 CACHE 16000 ROWS 2000
-```
-
-The above SQL statement will create a database demo, with 10 days for each data file, 16000 bytes for a cache block, and 2000 rows in a file block.
-
-The configuration provided when creating a database will overwrite the configuration in taos.cfg.
-
-## Configuration on Client
-
-*taos* is the TDengine shell and is a client that connects to taosd. TDengine uses the same configuration file taos.cfg for the client, with default location at /etc/taos. You can change it by specifying command line parameter -c when you run taos. For example, *taos -c /home/user*, it will read the configuration file taos.cfg from directory /home/user.
-
-The parameters related to client configuration are listed below:
-
-- masterIP: IP address of TDengine server
-- charset: character set, default is the system . For data type nchar, TDengine uses unicode to store the data. Thus, the client needs to tell its character set.
-- locale: system language setting
-- defaultUser: default login user, default is root
-- defaultPass: default password, default is taosdata
-
-For TCP/UDP port, and system debug/log configuration, it is the same as the server side.
-
-For server IP, user name, password, you can always specify them in the command line when you run taos. If they are not specified, they will be read from the taos.cfg
-
-## User Management
-
-System administrator (user root) can add, remove a user, or change the password from the TDengine shell. Commands are listed below:
-
-Create a user, password shall be quoted with the single quote.
-
-```mysql
-CREATE USER user_name PASS ‘password’
-```
-
-Remove a user
-
-```mysql
-DROP USER user_name
-```
-
-Change the password for a user
-
-```mysql
-ALTER USER user_name PASS ‘password’
-```
-
-List all users
-
-```mysql
-SHOW USERS
-```
-
-## Import Data
-
-Inside the TDengine shell, you can import data into TDengine from either a script or CSV file
-
-**Import from Script**
-
-```
-source
-```
-
-Inside the file, you can put all SQL statements there. Each SQL statement has a line. If a line starts with "#", it means comments, it will be skipped. The system will execute the SQL statements line by line automatically until the ends
-
-**Import from CSV**
-
-```mysql
-insert into tb1 file 'path/data.csv'
-```
-
-CSV file contains records for only one table, and the data structure shall be the same as the defined schema for the table. The header of CSV file shall be removed.
-
-For example, the following is a sub-table d1001:
-
-```mysql
-taos> DESCRIBE d1001
- Field | Type | Length | Note |
-=================================================================================
- ts | TIMESTAMP | 8 | |
- current | FLOAT | 4 | |
- voltage | INT | 4 | |
- phase | FLOAT | 4 | |
- location | BINARY | 64 | TAG |
- groupid | INT | 4 | TAG |
-```
-The data format in data.csv like this:
-
-```csv
-'2018-10-04 06:38:05.000',10.30000,219,0.31000
-'2018-10-05 06:38:15.000',12.60000,218,0.33000
-'2018-10-06 06:38:16.800',13.30000,221,0.32000
-'2018-10-07 06:38:05.000',13.30000,219,0.33000
-'2018-10-08 06:38:05.000',14.30000,219,0.34000
-'2018-10-09 06:38:05.000',15.30000,219,0.35000
-'2018-10-10 06:38:05.000',16.30000,219,0.31000
-'2018-10-11 06:38:05.000',17.30000,219,0.32000
-'2018-10-12 06:38:05.000',18.30000,219,0.31000
-```
-then data can be imported into database by this cmd:
-
-```
-taos> insert into d1001 file '~/data.csv';
-Query OK, 9 row(s) affected (0.004763s)
-```
-## Export Data
-
-You can export data either from TDengine shell or from tool taosdump.
-
-**Export from TDengine Shell**
-
-```mysql
-select * from >> data.csv
-```
-
-The above SQL statement will dump the query result set into data.csv file.
-
-**Export Using taosdump**
-
-TDengine provides a data dumping tool taosdump. You can choose to dump a database, a table, all data or data only a time range, even only the metadata. For example:
-
-- Export one or more tables in a DB: taosdump [OPTION…] dbname tbname …
-- Export one or more DBs: taosdump [OPTION…] --databases dbname…
-- Export all DBs (excluding system DB): taosdump [OPTION…] --all-databases
-
-run *taosdump —help* to get a full list of the options
-
-## Management of Connections, Streams, Queries
-
-The system administrator can check, kill the ongoing connections, streams, or queries.
-
-```
-SHOW CONNECTIONS
-```
-
-It lists all connections. The first column shows connection-id from the client.
-
-```
-KILL CONNECTION
-```
-
-It kills the connection, where connection-id is the number of the first column showed by "SHOW CONNECTIONS".
-
-```
-SHOW QUERIES
-```
-
-It shows the ongoing queries. The first column shows the connection-id:query-no, where connection-id is the connection from the client, and id assigned by the system
-
-```
-KILL QUERY
-```
-
-It kills the query, where query-id is the connection-id:query-no showed by "SHOW QUERIES". You can copy and paste it.
-
-```
-SHOW STREAMS
-```
-
-It shows the continuous queries. The first column shows the connection-id:stream-no, where connection-id is the connection from the client, and id assigned by the system.
-
-```
-KILL STREAM
-```
-
-It kills the continuous query, where stream-id is the connection-id:stream-no showed by "SHOW STREAMS". You can copy and paste it.
-
-## System Monitor
-
-TDengine runs a system monitor in the background. Once it is started, it will create a database sys automatically. System monitor collects the metric like CPU, memory, network, disk, number of requests periodically, and writes them into database sys. Also, TDengine will log all important actions, like login, logout, create database, drop database and so on, and write them into database sys.
-
-You can check all the saved monitor information from database sys. By default, system monitor is turned on. But you can turn it off by changing the parameter in the configuration file.
-
diff --git a/documentation20/webdocs/markdowndocs/advanced features.md b/documentation20/webdocs/markdowndocs/advanced features.md
deleted file mode 100644
index 05f6da5ed9e94684e7d39f086d850805eb966b85..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/advanced features.md
+++ /dev/null
@@ -1,80 +0,0 @@
-#Advanced Features
-
-##Continuous Query
-Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers, not by events. Continuous query can be applied to a table or a STable, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding).
-
-Continuous query is defined by TAOS SQL, there is nothing special. One of the best applications is downsampling. Once it is defined, at the end of each cycle, the system will execute the query, pass the result to the application or write it to a database.
-
-If historical data pints are inserted into the stream, the query won't be re-executed, and the result set won't be updated. If the result set is passed to the application, the application needs to keep the status of continuous query, the server won't maintain it. If application re-starts, it needs to decide the time where the stream computing shall be started.
-
-####How to use continuous query
-
-
-
- - Pass result set to application
-
- Application shall use API taos_stream (details in connector section) to start the stream computing. Inside the API, the SQL syntax is:
-
- ```sql
- SELECT aggregation FROM [table_name | stable_name]
- INTERVAL(window_size) SLIDING(period)
- ```
-
- where the new keyword INTERVAL specifies the window size, and SLIDING specifies the sliding period. If parameter sliding is not specified, the sliding period will be the same as window size. The minimum window size is 10ms. The sliding period shall not be larger than the window size. If you set a value larger than the window size, the system will adjust it to window size automatically.
-
- For example:
-
- ```sql
- SELECT COUNT(*) FROM FOO_TABLE
- INTERVAL(1M) SLIDING(30S)
- ```
-
- The above SQL statement will count the number of records for the past 1-minute window every 30 seconds.
-
- - Save the result into a database
-
- If you want to save the result set of stream computing into a new table, the SQL shall be:
-
- ```sql
- CREATE TABLE table_name AS
- SELECT aggregation from [table_name | stable_name]
- INTERVAL(window_size) SLIDING(period)
- ```
-
- Also, you can set the time range to execute the continuous query. If no range is specified, the continuous query will be executed forever. For example, the following continuous query will be executed from now and will stop in one hour.
-
- ```sql
- CREATE TABLE QUERY_RES AS
- SELECT COUNT(*) FROM FOO_TABLE
- WHERE TS > NOW AND TS <= NOW + 1H
- INTERVAL(1M) SLIDING(30S)
- ```
-
-###Manage the Continuous Query
-
-Inside TDengine shell, you can use the command "show streams" to list the ongoing continuous queries, the command "kill stream" to kill a specific continuous query.
-
-If you drop a table generated by the continuous query, the query will be removed too.
-
-##Publisher/Subscriber
-
-Time series data is a sequence of data points over time. Inside a table, the data points are stored in order of timestamp. Also, there is a data retention policy, the data points will be removed once their lifetime is passed. From another view, a table in DTengine is just a standard message queue.
-
-To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka.
-
-The detailed API will be introduced in the [connectors](https://www.taosdata.com/en/documentation/advanced-features/) section.
-
-##Caching
-TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer.
-
-By this design, the application can retrieve the latest data from each device super-fast, since they are all available in memory. You can use last or last_row function to return the last data record. If the super table is used, it can be used to return the last data records of all or a subset of devices. For example, to retrieve the latest temperature from thermometers in located Beijing, execute the following SQL
-
-```mysql
-select last(*) from thermometers where location=’beijing’
-```
-
-By this design, caching tool, like Redis, is not needed in the system. It will reduce the complexity of the system.
-
-TDengine creates one or more virtual nodes(vnode) in each data node. Each vnode contains data for multiple tables and has its own buffer. The buffer of a vnode is fully separated from the buffer of another vnode, not shared. But the tables in a vnode share the same buffer.
-
-System configuration parameter cacheBlockSize configures the cache block size in bytes, and another parameter cacheNumOfBlocks configures the number of cache blocks. The total memory for the buffer of a vnode is $cacheBlockSize \times cacheNumOfBlocks$. Another system parameter numOfBlocksPerMeter configures the maximum number of cache blocks a table can use. When you create a database, you can specify these parameters.
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/architecture.md b/documentation20/webdocs/markdowndocs/architecture.md
deleted file mode 100644
index 3a91f1e8dc24314f66acf906e69d3dcd0df8e370..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/architecture.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# Data Model and Architecture
-## Data Model
-
-### A Typical IoT Scenario
-
-In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data looks like the table below:
-
-| Device ID | Time Stamp | Value 1 | Value 2 | Value 3 | Tag 1 | Tag 2 |
-| :-------: | :-----------: | :-----: | :-----: | :-----: | :---: | :---: |
-| D1001 | 1538548685000 | 10.3 | 219 | 0.31 | Red | Tesla |
-| D1002 | 1538548684000 | 10.2 | 220 | 0.23 | Blue | BMW |
-| D1003 | 1538548686500 | 11.5 | 221 | 0.35 | Black | Honda |
-| D1004 | 1538548685500 | 13.4 | 223 | 0.29 | Red | Volvo |
-| D1001 | 1538548695000 | 12.6 | 218 | 0.33 | Red | Tesla |
-| D1004 | 1538548696600 | 11.8 | 221 | 0.28 | Black | Honda |
-
-Each data record has device ID, timestamp, the collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points, like a stream.
-
-### Data Characteristics
-
-Being a series of data points over time, data points generated by devices, sensors, servers, or applications have strong common characteristics.
-
-1. metric is always structured data;
-2. there are rarely delete/update operations on collected data;
-3. there is only one single data source for one device or sensor;
-4. ratio of read/write is much lower than typical Internet application;
-5. the user pays attention to the trend of data, not the specific value at a specific time;
-6. there is always a data retention policy;
-7. the data query is always executed in a given time range and a subset of devices;
-8. real-time aggregation or analytics is mandatory;
-9. traffic is predictable based on the number of devices and sampling frequency;
-10. data volume is huge, a system may generate 10 billion data points in a day.
-
-By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data. The system efficiency is improved significantly.
-
-### Relational Database Model
-
-Since time-series data is more likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definition, then insert data points and execute queries to explore the data. Standard SQL is used, there is no learning curve.
-
-### One Table for One Device
-
-Due to different network latency, the data points from different devices may arrive at the server out of order. But for the same device, data points will arrive at the server in order if system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003 and D1004, to store the data collected.
-
-This strong requirement can guarantee the data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Also, write latency can be significantly reduced too, since the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit the scenarios.
-
-### Best Practices
-
-**Table**: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, valu2, valu3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric group, each group have different sampling period, you shall create a table for each group for each device. The first column in the table must be time stamp. TDengine uses time stamp as the index, and won’t build the index on any metrics stored.
-
-**Tags:** to support aggregation over multiple tables efficiently, [STable(Super Table)](../super-table) concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics(like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device(like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values.
-
-**Database:** different types of devices may generate data points in different patterns and shall be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios
-
-**Schemaless vs Schema:** compared with NoSQL database, since a table with schema definition shall be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibilities won’t be a big pain to developers or the administrator. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed.
-
-TDengine does not impose a limitation on the number of tables, [STables](../super-table), or databases. You can create any number of STable or databases to fit the scenarios.
-
-## Architecture
-
-There are two main modules in TDengine server as shown in Picture 1: **Management Module (MGMT)** and **Data Module(DNODE)**. The whole TDengine architecture also includes a **TDengine Client Module**.
-
-
- Picture 1 TDengine Architecture
-### MGMT Module
-The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit MGMT module to get meta data, according to which, then access the DNODE module.
-
-### DNODE Module
-The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes.
-
-When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache, directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. By virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.
-
-
- Picture 2 TDengine Virtualization
-
-### Client Module
-TDengine client module accepts requests (mainly in SQL form) from applications and converts the requests to internal representations and sends to the server side. TDengine supports multiple interfaces, which are all built on top of TDengine client module.
-
-For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter mgmtShellPort in system configuration file taos.cfg, default is 6030. For the communication between client and DNODE module, TCP/UDP is used, the port is set by the parameter vnodeShellPort in the system configuration file, default is 6035.
-
-## Writing Process
-Picture 3 shows the full writing process of TDengine. TDengine uses [Writing Ahead Log] (WAL) strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power lose or other situations, the commit log is used to recover data. After writting to commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:
-
-1. **Flush driven by timer**: There is a backend timer which flushes data in cache periodically to disks. The period is configurable via parameter commitTime in system configuration file taos.cfg.
-2. **Flush driven by data**: Data in the cache is also flushed to disks when the left buffer size is below a threshold. Flush driven by data can reset the timer of flush driven by the timer.
-
-
- Picture 3 TDengine Writting Process
-
-New commit log file will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.
-
-## Data Storage
-
-TDengine data are saved in _/var/lib/taos_ directory by default. It can be changed to other directories by setting the parameter dataDir in system configuration file taos.cfg.
-
-TDengine's metadata includes the database, table, user, super table and tag information. To reduce the latency, metadata are all buffered in the cache.
-
-Data records saved in tables are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data searching speed. By default, one group of files contain data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause.
-
-Data records are removed automatically once their lifetime is passed. The lifetime is configurable via parameter daysToKeep in the system configuration file. The default value is 3650 days.
-
-Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and the different compression algorithm is applied based on each column's data type.
\ No newline at end of file
diff --git a/documentation20/webdocs/markdowndocs/faq.md b/documentation20/webdocs/markdowndocs/faq.md
deleted file mode 100644
index ce7d2ebf5e390d616d47835af5f958af1223ea59..0000000000000000000000000000000000000000
--- a/documentation20/webdocs/markdowndocs/faq.md
+++ /dev/null
@@ -1,46 +0,0 @@
-#FAQ
-
-#### 1. How to upgrade TDengine from 1.X versions to 2.X and above versions?
-
-Version 2.X is a complete refactoring of the previous version, and configuration files and data files are incompatible. Be sure to do the following before upgrading:
-
-1. Delete the configuration file, and execute sudo rm -rf /etc/taos/taos
-2. Delete the log file, and execute sudo rm -rf /var/log/taos
-3. ENSURE THAT YOUR DATAS ARE NO LONGER NEEDED! Delete the data file, and execute sudo rm -rf /var/lib/taos
-4. Enjoy the latest stable version of TDengine
-5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance
-
-#### 2. When encoutered with the error "Unable to establish connection", what can I do?
-
-The client may encounter connection errors. Please follow the steps below for troubleshooting:
-
-1. Make sure that the client and server version Numbers are exactly the same, and that the open source community and Enterprise versions are not mixed.
-2. On the server side, execute `systemctl status taosd` to check the status of *taosd* service. If *taosd* is not running, start it and retry connecting.
-3. Make sure you have used the correct server IP address to connect to.
-4. Ping the server. If no response is received, check your network connection.
-5. Check the firewall setting, make sure the TCP/UDP ports from 6030-6039 are enabled.
-6. For JDBC, ODBC, Python, Go connections on Linux, make sure the native library *libtaos.so* are located at /usr/local/lib/taos, and /usr/local/lib/taos is in the *LD_LIBRARY_PATH*.
-7. For JDBC, ODBC, Python, Go connections on Windows, make sure *driver/c/taos.dll* is in the system search path (or you can copy taos.dll into *C:\Windows\System32*)
-8. If the above steps can not help, try the network diagnostic tool *nc* to check if TCP/UDP port works
- check UDP port:`nc -vuz {hostIP} {port} `
- check TCP port on server: `nc -l {port}`
- check TCP port on client: ` nc {hostIP} {port}`
-
-#### 3. Why I get "Invalid SQL" error when a query is syntactically correct?
-
-If you are sure your query has correct syntax, please check the length of the SQL string. Before version 2.0, it shall be less than 64KB.
-
-#### 4. Does TDengine support validation queries?
-
-For the time being, TDengine does not have a specific set of validation queries. However, TDengine comes with a system monitoring database named 'sys', which can usually be used as a validation query object.
-
-#### 5. Can I delete or update a record that has been written into TDengine?
-
-The answer is NO. The design of TDengine is based on the assumption that records are generated by the connected devices, you won't be allowed to change it. But TDengine provides a retention policy, the data records will be removed once their lifetime is passed.
-
-#### 6. What is the most efficient way to write data to TDengine?
-
-TDengine supports several different writing regimes. The most efficient way to write data to TDengine is to use batch inserting. For details on batch insertion syntax, please refer to [Taos SQL](../documentation/taos-sql)
-
-
-
diff --git a/importSampleData/README.md b/importSampleData/README.md
index ee3a6e073c18b618af49a9c0b6d2d6d07718f00f..56c5be0da422aadc5e05fe000ab83c312d29b6c8 100644
--- a/importSampleData/README.md
+++ b/importSampleData/README.md
@@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
-* -savetb int
+* -savetb string
当 save 为 1 时保存统计信息的表名, 默认 statistic。
diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go
index 61de6e740c1f0cf71c8c94e384dcd68fc58fbc60..5fee49734d058082b662ad0e173b22cf78acac43 100644
--- a/importSampleData/app/main.go
+++ b/importSampleData/app/main.go
@@ -7,7 +7,6 @@ import (
"encoding/json"
"flag"
"fmt"
- "hash/crc32"
"io"
"log"
"os"
@@ -17,47 +16,55 @@ import (
"sync"
"time"
- dataimport "github.com/taosdata/TDengine/importSampleData/import"
+ dataImport "github.com/taosdata/TDengine/importSampleData/import"
_ "github.com/taosdata/driver-go/taosSql"
)
const (
- TIMESTAMP = "timestamp"
- DATETIME = "datetime"
- MILLISECOND = "millisecond"
- DEFAULT_STARTTIME int64 = -1
- DEFAULT_INTERVAL int64 = 1 * 1000
- DEFAULT_DELAY int64 = -1
- DEFAULT_STATISTIC_TABLE = "statistic"
-
- JSON_FORMAT = "json"
- CSV_FORMAT = "csv"
- SUPERTABLE_PREFIX = "s_"
- SUBTABLE_PREFIX = "t_"
-
- DRIVER_NAME = "taosSql"
- STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
- INSERT_PREFIX = "insert into "
+ // 主键类型必须为 timestamp
+ TIMESTAMP = "timestamp"
+
+ // 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
+ DATETIME = "datetime"
+ MILLISECOND = "millisecond"
+
+ DefaultStartTime int64 = -1
+ DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
+ DefaultDelay int64 = -1 //
+
+ // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
+ DefaultStatisticTable = "statistic"
+
+ // 样例数据文件格式,可以是 json 或 csv
+ JsonFormat = "json"
+ CsvFormat = "csv"
+
+ SuperTablePrefix = "s_" // 超级表前缀
+ SubTablePrefix = "t_" // 子表前缀
+
+ DriverName = "taosSql"
+ StartTimeLayout = "2006-01-02 15:04:05.000"
+ InsertPrefix = "insert into "
)
var (
- cfg string
- cases string
- hnum int
- vnum int
- thread int
- batch int
- auto int
- starttimestr string
- interval int64
- host string
- port int
- user string
- password string
- dropdb int
- db string
- dbparam string
+ cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml
+ cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info
+ hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。
+ vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次
+ thread int // 执行导入数据的线程数目,默认为 10
+ batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录
+ auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0
+ startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空
+ interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000
+ host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1
+ port int // 导入的 TDengine 服务器端口,默认为 6030
+ user string // 导入的 TDengine 用户名,默认为 root
+ password string // 导入的 TDengine 用户密码,默认为 taosdata
+ dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0
+ db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
+ dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空
dataSourceName string
startTime int64
@@ -72,10 +79,10 @@ var (
lastStaticTime time.Time
lastTotalRows int64
timeTicker *time.Ticker
- delay int64 // default 10 milliseconds
- tick int64
- save int
- saveTable string
+ delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
+ tick int64 // 打印统计信息的时间间隔,默认 2000 ms。
+ save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
+ saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
)
type superTableConfig struct {
@@ -83,7 +90,7 @@ type superTableConfig struct {
endTime int64
cycleTime int64
avgInterval int64
- config dataimport.CaseConfig
+ config dataImport.CaseConfig
}
type scaleTableInfo struct {
@@ -92,14 +99,14 @@ type scaleTableInfo struct {
insertRows int64
}
-type tableRows struct {
- tableName string // tableName
- value string // values(...)
-}
+//type tableRows struct {
+// tableName string // tableName
+// value string // values(...)
+//}
type dataRows struct {
rows []map[string]interface{}
- config dataimport.CaseConfig
+ config dataImport.CaseConfig
}
func (rows dataRows) Len() int {
@@ -107,9 +114,9 @@ func (rows dataRows) Len() int {
}
func (rows dataRows) Less(i, j int) bool {
- itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
- jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
- return itime < jtime
+ iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
+ jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
+ return iTime < jTime
}
func (rows dataRows) Swap(i, j int) {
@@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 {
}
func init() {
- parseArg() //parse argument
+ parseArg() // parse argument
if db == "" {
- //db = "go"
+ // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
}
- if auto == 1 && len(starttimestr) == 0 {
+ if auto == 1 && len(startTimeStr) == 0 {
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
}
- if len(starttimestr) != 0 {
- t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local)
+ if len(startTimeStr) != 0 {
+ t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local)
if err != nil {
- log.Fatalf("param startTime %s error, %s\n", starttimestr, err)
+ log.Fatalf("param startTime %s error, %s\n", startTimeStr, err)
}
startTime = t.UnixNano() / 1e6 // as millisecond
} else {
- startTime = DEFAULT_STARTTIME
+ startTime = DefaultStartTime
}
dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)
@@ -154,9 +161,9 @@ func init() {
func main() {
- importConfig := dataimport.LoadConfig(cfg)
+ importConfig := dataImport.LoadConfig(cfg)
- var caseMinumInterval int64 = -1
+ var caseMinInterval int64 = -1
for _, userCase := range strings.Split(cases, ",") {
caseConfig, ok := importConfig.UserCases[userCase]
@@ -168,7 +175,7 @@ func main() {
checkUserCaseConfig(userCase, &caseConfig)
- //read file as map array
+ // read file as map array
fileRows := readFile(caseConfig)
log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))
@@ -177,31 +184,31 @@ func main() {
continue
}
- _, exists := superTableConfigMap[caseConfig.Stname]
+ _, exists := superTableConfigMap[caseConfig.StName]
if !exists {
- superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig}
+ superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig}
} else {
- log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
+ log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName)
}
var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)
// set super table's startTime, cycleTime and avgInterval
- superTableConfigMap[caseConfig.Stname].startTime = start
- superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval
- superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime
+ superTableConfigMap[caseConfig.StName].startTime = start
+ superTableConfigMap[caseConfig.StName].cycleTime = cycleTime
+ superTableConfigMap[caseConfig.StName].avgInterval = avgInterval
- if caseMinumInterval == -1 || caseMinumInterval > avgInterval {
- caseMinumInterval = avgInterval
+ if caseMinInterval == -1 || caseMinInterval > avgInterval {
+ caseMinInterval = avgInterval
}
- startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT)
+ startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout)
log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
}
- if DEFAULT_DELAY == delay {
+ if DefaultDelay == delay {
// default delay
- delay = caseMinumInterval / 2
+ delay = caseMinInterval / 2
if delay < 1 {
delay = 1
}
@@ -218,7 +225,7 @@ func main() {
createSuperTable(superTableConfigMap)
log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)
- //create sub table
+ // create sub table
start = time.Now()
createSubTable(subTableMap)
log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)
@@ -278,7 +285,7 @@ func staticSpeed() {
defer connection.Close()
if save == 1 {
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
if err != nil {
log.Fatalf("create %s Table error: %s\n", saveTable, err)
@@ -294,12 +301,12 @@ func staticSpeed() {
total := getTotalRows(successRows)
currentSuccessRows := total - lastTotalRows
- speed := currentSuccessRows * 1e9 / int64(usedTime)
+ speed := currentSuccessRows * 1e9 / usedTime
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
if save == 1 {
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
- connection.Exec(insertSql)
+ _, _ = connection.Exec(insertSql)
}
lastStaticTime = currentTime
@@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
} else {
// use the sample data primary timestamp
- sort.Sort(fileRows) // sort the file data by the primarykey
+ sort.Sort(fileRows) // sort the file data by the primaryKey
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
start = minTime // default startTime use the minTime
- if DEFAULT_STARTTIME != startTime {
+ // 设置了start时间的话 按照start来
+ if DefaultStartTime != startTime {
start = startTime
}
@@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
return
}
-func createStatisticTable() {
- connection := getConnection()
- defer connection.Close()
-
- _, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)")
- if err != nil {
- log.Fatalf("createStatisticTable error: %s\n", err)
- }
-}
-
func createSubTable(subTableMaps map[string]*dataRows) {
connection := getConnection()
defer connection.Close()
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
createTablePrefix := "create table if not exists "
+ var buffer bytes.Buffer
for subTableName := range subTableMaps {
- superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname)
- tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
+ superTableName := getSuperTableName(subTableMaps[subTableName].config.StName)
+ firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
- buffers := bytes.Buffer{}
- // create table t using supertTable tags(...);
+ // create table t using superTable tags(...);
for i := 0; i < hnum; i++ {
tableName := getScaleSubTableName(subTableName, i)
@@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) {
}
scaleTableNames = append(scaleTableNames, tableName)
- buffers.WriteString(createTablePrefix)
- buffers.WriteString(tableName)
- buffers.WriteString(" using ")
- buffers.WriteString(superTableName)
- buffers.WriteString(" tags(")
+ buffer.WriteString(createTablePrefix)
+ buffer.WriteString(tableName)
+ buffer.WriteString(" using ")
+ buffer.WriteString(superTableName)
+ buffer.WriteString(" tags(")
for _, tag := range subTableMaps[subTableName].config.Tags {
- tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
- buffers.WriteString("'" + tagValue + "'")
- buffers.WriteString(",")
+ tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)])
+ buffer.WriteString("'" + tagValue + "'")
+ buffer.WriteString(",")
}
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(")")
+ buffer.Truncate(buffer.Len() - 1)
+ buffer.WriteString(")")
- createTableSql := buffers.String()
- buffers.Reset()
+ createTableSql := buffer.String()
+ buffer.Reset()
//log.Printf("create table: %s\n", createTableSql)
_, err := connection.Exec(createTableSql)
@@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
if err != nil {
log.Fatalf("drop database error: %s\n", err)
}
- log.Printf("dropDb: %s\n", dropDbSql)
+ log.Printf("dropdb: %s\n", dropDbSql)
}
createDbSql := "create database if not exists " + db + " " + dbparam
@@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
}
log.Printf("createDb: %s\n", createDbSql)
- connection.Exec("use " + db)
+ _, _ = connection.Exec("use " + db)
prefix := "create table if not exists "
var buffer bytes.Buffer
@@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
createSql := buffer.String()
buffer.Reset()
- //log.Printf("supertable: %s\n", createSql)
+ //log.Printf("superTable: %s\n", createSql)
_, err = connection.Exec(createSql)
if err != nil {
log.Fatalf("create supertable error: %s\n", err)
@@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
}
-func getScaleSubTableName(subTableName string, hnum int) string {
- if hnum == 0 {
+func getScaleSubTableName(subTableName string, hNum int) string {
+ if hNum == 0 {
return subTableName
}
- return fmt.Sprintf("%s_%d", subTableName, hnum)
+ return fmt.Sprintf("%s_%d", subTableName, hNum)
}
-func getSuperTableName(stname string) string {
- return SUPERTABLE_PREFIX + stname
+func getSuperTableName(stName string) string {
+ return SuperTablePrefix + stName
}
/**
@@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime
- subTableName := getSubTableName(tableValue, fileRows.config.Stname)
+ subTableName := getSubTableName(tableValue, fileRows.config.StName)
value, ok := subTableMap[subTableName]
if !ok {
@@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
continue
}
- subTableName := getSubTableName(tableValue, fileRows.config.Stname)
+ subTableName := getSubTableName(tableValue, fileRows.config.StName)
value, ok := currSubTableMap[subTableName]
if !ok {
@@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
}
- var maxRows, tableRows int = 0, 0
+ var maxRows, tableRows = 0, 0
for tableName := range currSubTableMap {
tableRows = len(currSubTableMap[tableName].rows)
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
@@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
}
func getSubTableName(subTableValue string, superTableName string) string {
- return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
+ return SubTablePrefix + subTableValue + "_" + superTableName
}
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
@@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
defer connection.Close()
defer wg.Done()
- connection.Exec("use " + db) // use db
+ _, _ = connection.Exec("use " + db) // use db
log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)
num := 0
subTables := scaleTableNames[start:end]
+ var buffer bytes.Buffer
for {
var currSuccessRows int64
var appendRows int
var lastTableName string
- buffers := bytes.Buffer{}
- buffers.WriteString(INSERT_PREFIX)
+ buffer.WriteString(InsertPrefix)
for _, tableName := range subTables {
subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
subTableRows := int64(len(subTableInfo.rows))
- superTableConf := superTableConfigMap[subTableInfo.config.Stname]
+ superTableConf := superTableConfigMap[subTableInfo.config.StName]
tableStartTime := superTableConf.startTime
var tableEndTime int64
@@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
// append
if lastTableName != tableName {
- buffers.WriteString(tableName)
- buffers.WriteString(" values")
+ buffer.WriteString(tableName)
+ buffer.WriteString(" values")
}
lastTableName = tableName
- buffers.WriteString("(")
- buffers.WriteString(fmt.Sprintf("%v", currentTime))
- buffers.WriteString(",")
+ buffer.WriteString("(")
+ buffer.WriteString(fmt.Sprintf("%v", currentTime))
+ buffer.WriteString(",")
- // fieldNum := len(subTableInfo.config.Fields)
for _, field := range subTableInfo.config.Fields {
- buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
- buffers.WriteString(",")
- // if( i != fieldNum -1){
-
- // }
+ buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
+ buffer.WriteString(",")
}
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(") ")
+ buffer.Truncate(buffer.Len() - 1)
+ buffer.WriteString(") ")
appendRows++
insertRows++
if appendRows == batch {
- // executebatch
- insertSql := buffers.String()
- connection.Exec("use " + db)
+ // executeBatch
+ insertSql := buffer.String()
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows
currSuccessRows += affectedRows
- buffers.Reset()
- buffers.WriteString(INSERT_PREFIX)
+ buffer.Reset()
+ buffer.WriteString(InsertPrefix)
lastTableName = ""
appendRows = 0
}
@@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
// left := len(rows)
if appendRows > 0 {
- // executebatch
- insertSql := buffers.String()
- connection.Exec("use " + db)
+ // executeBatch
+ insertSql := buffer.String()
affectedRows := executeBatchInsert(insertSql, connection)
successRows[threadIndex] += affectedRows
currSuccessRows += affectedRows
- buffers.Reset()
+ buffer.Reset()
}
// log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6)
@@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
}
-func buildSql(rows []tableRows) string {
-
- var lastTableName string
-
- buffers := bytes.Buffer{}
-
- for i, row := range rows {
- if i == 0 {
- lastTableName = row.tableName
- buffers.WriteString(INSERT_PREFIX)
- buffers.WriteString(row.tableName)
- buffers.WriteString(" values")
- buffers.WriteString(row.value)
- continue
- }
-
- if lastTableName == row.tableName {
- buffers.WriteString(row.value)
- } else {
- buffers.WriteString(" ")
- buffers.WriteString(row.tableName)
- buffers.WriteString(" values")
- buffers.WriteString(row.value)
- lastTableName = row.tableName
- }
- }
-
- inserSql := buffers.String()
- return inserSql
-}
-
-func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows {
-
- tableRows := tableRows{tableName: tableName}
-
- buffers := bytes.Buffer{}
-
- buffers.WriteString("(")
- buffers.WriteString(fmt.Sprintf("%v", currentTime))
- buffers.WriteString(",")
-
- for _, field := range subTableInfo.config.Fields {
- buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
- buffers.WriteString(",")
- }
-
- buffers.Truncate(buffers.Len() - 1)
- buffers.WriteString(")")
-
- insertSql := buffers.String()
- tableRows.value = insertSql
-
- return tableRows
-}
-
func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
- result, error := connection.Exec(insertSql)
- if error != nil {
- log.Printf("execute insertSql %s error, %s\n", insertSql, error)
+ result, err := connection.Exec(insertSql)
+ if err != nil {
+ log.Printf("execute insertSql %s error, %s\n", insertSql, err)
return 0
}
affected, _ := result.RowsAffected()
@@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
affected = 0
}
return affected
- // return 0
}
func getFieldValue(fieldValue interface{}) string {
@@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string {
}
func getConnection() *sql.DB {
- db, err := sql.Open(DRIVER_NAME, dataSourceName)
+ db, err := sql.Open(DriverName, dataSourceName)
if err != nil {
panic(err)
}
@@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string {
return fmt.Sprintf("%v", suffix)
}
-func hash(s string) int {
- v := int(crc32.ChecksumIEEE([]byte(s)))
- if v < 0 {
- return -v
- }
- return v
-}
-
-func readFile(config dataimport.CaseConfig) dataRows {
+func readFile(config dataImport.CaseConfig) dataRows {
fileFormat := strings.ToLower(config.Format)
- if fileFormat == JSON_FORMAT {
+ if fileFormat == JsonFormat {
return readJSONFile(config)
- } else if fileFormat == CSV_FORMAT {
+ } else if fileFormat == CsvFormat {
return readCSVFile(config)
}
@@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows {
return dataRows{}
}
-func readCSVFile(config dataimport.CaseConfig) dataRows {
+func readCSVFile(config dataImport.CaseConfig) dataRows {
var rows dataRows
f, err := os.Open(config.FilePath)
if err != nil {
@@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
line := strings.ToLower(string(lineBytes))
titles := strings.Split(line, config.Separator)
if len(titles) < 3 {
- // need suffix、 primarykey and at least one other field
+ // need suffix、 primaryKey and at least one other field
log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
return rows
}
@@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
}
// if the primary key valid
- primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
+ primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
if primaryKeyValue == -1 {
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
continue
@@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
return rows
}
-func readJSONFile(config dataimport.CaseConfig) dataRows {
+func readJSONFile(config dataImport.CaseConfig) dataRows {
var rows dataRows
f, err := os.Open(config.FilePath)
@@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
continue
}
- primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
+ primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
if primaryKeyValue == -1 {
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
continue
@@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
/**
* get primary key as millisecond , otherwise return -1
*/
-func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
+func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
if !existMapKeyAndNotEmpty(key, line) {
return -1
}
@@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool {
return true
}
-func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
+func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) {
- if len(caseConfig.Stname) == 0 {
+ if len(caseConfig.StName) == 0 {
log.Fatalf("the stname of case %s can't be empty\n", caseName)
}
- caseConfig.Stname = strings.ToLower(caseConfig.Stname)
+ caseConfig.StName = strings.ToLower(caseConfig.StName)
if len(caseConfig.Tags) == 0 {
log.Fatalf("the tags of case %s can't be empty\n", caseName)
@@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
}
func parseArg() {
- flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.")
- flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
+ flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.")
+ flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
- flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
+ flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.")
flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.")
- flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.")
+ flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.")
flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
- flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
- flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.")
- flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
+ flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
+ flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.")
+ flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
flag.IntVar(&port, "port", 6030, "tdengine server port.")
flag.StringVar(&user, "user", "root", "user name to login into the database.")
flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
- flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.")
+ flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.")
flag.StringVar(&db, "db", "", "name of the database to store data.")
flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")
@@ -1066,7 +994,7 @@ func printArg() {
fmt.Println("-thread:", thread)
fmt.Println("-batch:", batch)
fmt.Println("-auto:", auto)
- fmt.Println("-start:", starttimestr)
+ fmt.Println("-start:", startTimeStr)
fmt.Println("-interval:", interval)
fmt.Println("-host:", host)
fmt.Println("-port", port)
diff --git a/importSampleData/data/sensor_info.csv b/importSampleData/data/sensor_info.csv
index d049c8b00460cdcc2a1bd5b990ae6efa2aa63bd3..c5ff898118e59dcfc0eb24d03db7b326b5fb9342 100644
--- a/importSampleData/data/sensor_info.csv
+++ b/importSampleData/data/sensor_info.csv
@@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity
8, haerbing, yellow, 2, 1575129697000, 31, 16.321497
8, haerbing, yellow, 2, 1575129698000, 25, 15.864515
8, haerbing, yellow, 2, 1575129699000, 25, 16.492443
-9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889
-9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610
-9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319
-9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807
-9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642
-9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246
-9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774
-9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364
-9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084
-9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289
-9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973
-9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783
-9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101
-9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450
-9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142
-9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837
-9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730
-9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146
-9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187
-9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345
-9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215
-9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621
-9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735
-9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125
-9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485
-9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433
-9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050
-9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168
-9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681
-9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272
-9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976
-9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909
-9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740
-9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645
-9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036
-9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812
-9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939
-9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587
-9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381
-9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530
-9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318
-9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237
-9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297
-9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282
-9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654
-9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736
-9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354
-9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465
-9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276
-9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892
-9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566
-9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849
-9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709
-9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651
-9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347
-9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095
-9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746
-9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939
-9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819
-9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181
-9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922
-9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530
-9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657
-9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854
-9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554
-9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130
-9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404
-9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847
-9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378
-9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507
-9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808
-9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065
-9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803
-9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544
-9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786
-9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420
-9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522
-9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531
-9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744
-9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456
-9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136
-9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523
-9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873
-9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768
-9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610
-9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299
-9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126
-9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302
-9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350
-9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790
-9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568
-9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267
-9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092
-9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021
-9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909
-9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524
-9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127
-9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012
-9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551
-9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868
\ No newline at end of file
+9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889
+9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610
+9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319
+9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807
+9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642
+9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246
+9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774
+9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364
+9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084
+9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289
+9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973
+9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783
+9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101
+9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450
+9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142
+9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837
+9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730
+9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146
+9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187
+9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345
+9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215
+9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621
+9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735
+9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125
+9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485
+9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433
+9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050
+9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168
+9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681
+9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272
+9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976
+9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909
+9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740
+9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645
+9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036
+9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812
+9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939
+9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587
+9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381
+9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530
+9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318
+9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237
+9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297
+9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282
+9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654
+9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736
+9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354
+9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465
+9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276
+9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892
+9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566
+9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849
+9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709
+9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651
+9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347
+9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095
+9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746
+9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939
+9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819
+9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181
+9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922
+9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530
+9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657
+9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854
+9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554
+9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130
+9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404
+9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847
+9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378
+9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507
+9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808
+9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065
+9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803
+9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544
+9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786
+9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420
+9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522
+9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531
+9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744
+9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456
+9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136
+9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523
+9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873
+9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768
+9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610
+9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299
+9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126
+9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302
+9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350
+9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790
+9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568
+9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267
+9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092
+9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021
+9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909
+9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524
+9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127
+9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012
+9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551
+9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868
\ No newline at end of file
diff --git a/importSampleData/go.mod b/importSampleData/go.mod
new file mode 100644
index 0000000000000000000000000000000000000000..fa1d978e597b3eb5b9f35e45f599d5a0f97ff267
--- /dev/null
+++ b/importSampleData/go.mod
@@ -0,0 +1,8 @@
+module github.com/taosdata/TDengine/importSampleData
+
+go 1.13
+
+require (
+ github.com/pelletier/go-toml v1.9.0 // indirect
+ github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
+)
diff --git a/importSampleData/import/import_config.go b/importSampleData/import/import_config.go
index e7942cc5050ae369afe896d0f46a0e242fb7e8f6..fdaeeab7da43968f3484c193e6791cc1e45634d7 100644
--- a/importSampleData/import/import_config.go
+++ b/importSampleData/import/import_config.go
@@ -14,23 +14,23 @@ var (
once sync.Once
)
-// Config inclue all scene import config
+// Config include all scene import config
type Config struct {
UserCases map[string]CaseConfig
}
// CaseConfig include the sample data config and tdengine config
type CaseConfig struct {
- Format string
- FilePath string
- Separator string
- Stname string
- SubTableName string
- Timestamp string
- TimestampType string
- TimestampTypeFormat string
- Tags []FieldInfo
- Fields []FieldInfo
+ Format string
+ FilePath string
+ Separator string
+ StName string
+ SubTableName string
+ Timestamp string
+ TimestampType string
+ TimestampTypeFormat string
+ Tags []FieldInfo
+ Fields []FieldInfo
}
// FieldInfo is field or tag info
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index d3bd7510a339c7386cdf83ce5806c2e3ad63db8e..3ae4e9941e96abb4c93b99ae86c40b3e3583bd08 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -40,7 +40,7 @@
# ratioOfQueryCores 1.0
# the last_row/first/last aggregator will not change the original column name in the result fields
-# keepColumnName 0
+keepColumnName 1
# number of management nodes in the system
# numOfMnodes 3
@@ -144,11 +144,16 @@
# max length of an SQL
# maxSQLLength 65480
+# max length of WildCards
+# maxWildCardsLength 100
+
# the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
# system time zone
# timezone Asia/Shanghai (CST, +0800)
+# system time zone (for windows 10)
+# timezone UTC-8
# system locale
# locale en_US.UTF-8
diff --git a/packaging/check_package.sh b/packaging/check_package.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e4d783d2f917abff1cd2aaff3714ce6c7edd5039
--- /dev/null
+++ b/packaging/check_package.sh
@@ -0,0 +1,245 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+
+# -----------------------Variables definition---------------------
+script_dir="../release"
+# Dynamic directory
+data_dir="/var/lib/taos"
+log_dir="/var/log/taos"
+
+data_link_dir="/usr/local/taos/data"
+log_link_dir="/usr/local/taos/log"
+
+cfg_install_dir="/etc/taos"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/taos"
+
+# old bin dir
+sbin_dir="/usr/local/taos/bin"
+
+temp_version=""
+fin_result=""
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:d:" arg
+do
+ case $arg in
+ d)
+ #echo "interactiveFqdn=$OPTARG"
+ script_dir=$( echo $OPTARG )
+ ;;
+ h)
+ echo "Usage: `basename $0` -d scripy_path"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function check_file() {
+ #check file whether exists
+ if [ ! -e $1/$2 ];then
+ echo -e "$1/$2 \033[31mnot exists\033[0m!quit"
+ fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
+ echo -e $fin_result
+ exit 8
+ fi
+}
+
+function get_package_name() {
+ var=$1
+ if [[ $1 =~ 'aarch' ]];then
+ echo ${var::-21}
+ else
+ echo ${var::-17}
+ fi
+}
+function check_link() {
+ #check Link whether exists or broken
+ if [ -L $1 ] ; then
+ if [ ! -e $1 ] ; then
+ echo -e "$1 \033[31Broken link\033[0m"
+ fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
+ echo -e $fin_result
+ exit 8
+ fi
+ else
+ echo -e "$1 \033[31mnot exists\033[0m!quit"
+ fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
+ echo -e $fin_result
+ exit 8
+ fi
+}
+
+function check_main_path() {
+ #check install main dir and all sub dir
+ main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
+ for i in ${main_dir[@]};do
+ check_file ${install_main_dir} $i
+ done
+ if [ "$verMode" == "cluster" ]; then
+ nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
+ for i in ${nginx_main_dir[@]};do
+ check_file ${nginx_dir} $i
+ done
+ fi
+ echo -e "Check main path:\033[32mOK\033[0m!"
+}
+
+function check_bin_path() {
+ # check install bin dir and all sub dir
+ bin_dir=("taos" "taosd" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
+ for i in ${bin_dir[@]};do
+ check_file ${sbin_dir} $i
+ done
+ lbin_dir=("taos" "taosd" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
+ for i in ${lbin_dir[@]};do
+ check_link ${bin_link_dir}/$i
+ done
+ if [ "$verMode" == "cluster" ]; then
+ check_file ${nginx_dir}/sbin nginx
+ fi
+ echo -e "Check bin path:\033[32mOK\033[0m!"
+}
+
+
+function check_lib_path() {
+ # check all links
+ check_link ${lib_link_dir}/libtaos.so
+ check_link ${lib_link_dir}/libtaos.so.1
+
+ if [[ -d ${lib64_link_dir} ]]; then
+ check_link ${lib64_link_dir}/libtaos.so
+ check_link ${lib64_link_dir}/libtaos.so.1
+ fi
+ echo -e "Check lib path:\033[32mOK\033[0m!"
+}
+
+
+function check_header_path() {
+ # check all header
+ header_dir=("taos.h" "taoserror.h")
+ for i in ${header_dir[@]};do
+ check_link ${inc_link_dir}/$i
+ done
+ echo -e "Check bin path:\033[32mOK\033[0m!"
+}
+
+
+function check_config_dir() {
+ # check all config
+ check_file ${cfg_install_dir} taos.cfg
+ check_file ${install_main_dir}/cfg taos.cfg.org
+ echo -e "Check conf path:\033[32mOK\033[0m!"
+}
+
+function check_log_path() {
+ # check log path
+ check_file ${log_dir}
+ echo -e "Check log path:\033[32mOK\033[0m!"
+}
+
+function check_data_path() {
+ # check data path
+ check_file ${data_dir}
+ echo -e "Check data path:\033[32mOK\033[0m!"
+}
+
+function install_TDengine() {
+ cd ${script_dir}
+ tar zxf $1
+ temp_version=$(get_package_name $1)
+ cd $(get_package_name $1)
+ echo -e "\033[32muninstall TDengine && install TDengine...\033[0m"
+ rmtaos >/dev/null 2>&1 || echo 'taosd not installed' && echo -e '\n\n' |./install.sh >/dev/null 2>&1
+ echo -e "\033[32mTDengine has been installed!\033[0m"
+ echo -e "\033[32mTDengine is starting...\033[0m"
+ kill_process taos && systemctl start taosd && sleep 10
+}
+
+function test_TDengine() {
+ check_main_path
+ check_bin_path
+ check_lib_path
+ check_header_path
+ check_config_dir
+ check_log_path
+ check_data_path
+ result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
+ if [[ $result =~ "Unable to establish" ]];then
+ echo -e "\033[31mTDengine connect failed\033[0m"
+ fin_result=$fin_result"\033[31m$temp_version\033[0m test failed!\n"
+ echo -e $fin_result
+ exit 8
+ fi
+ echo -e "Check TDengine connect:\033[32mOK\033[0m!"
+ fin_result=$fin_result"\033[32m$temp_version\033[0m test OK!\n"
+}
+# ## ==============================Main program starts from here============================
+TD_package_name=`ls ${script_dir}/*server*gz |awk -F '/' '{print $NF}' `
+temp=`pwd`
+for i in $TD_package_name;do
+ if [[ $i =~ 'enterprise' ]];then
+ verMode="cluster"
+ else
+ verMode=""
+ fi
+ cd $temp
+ install_TDengine $i
+ test_TDengine
+done
+echo "============================================================"
+echo -e $fin_result
\ No newline at end of file
diff --git a/packaging/release.sh b/packaging/release.sh
index d7dd7c269cc039eba43f50c52b3599e46dd950c6..5ba6c01a0bd5689278bdb5c86b538b3c447f086a 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os
+# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
@@ -11,7 +11,7 @@ set -e
# -V [stable | beta]
# -l [full | lite]
# -s [static | dynamic]
-# -d [taos | power]
+# -d [taos | power | tq ]
# -n [2.0.0.3]
# -m [2.0.0.0]
@@ -22,10 +22,10 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
+dbName=taos # [taos | power | tq]
allocator=glibc # [glibc | jemalloc]
-dbName=taos # [taos | power]
verNumber=""
-verNumberComp="2.0.0.0"
+verNumberComp="1.0.0.0"
while getopts "hv:V:c:o:l:s:d:a:n:m:" arg
do
@@ -78,7 +78,7 @@ do
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
- echo " -d [taos | power] "
+ echo " -d [taos | power | tq ] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
@@ -249,6 +249,10 @@ if [ "$osType" != "Darwin" ]; then
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp}
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ elif [[ "$dbName" == "tq" ]]; then
+ ${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
+ ${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
diff --git a/packaging/tools/install_arbi_tq.sh b/packaging/tools/install_arbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bd852dd0ad2c9114f2424193adccf56b0cb40412
--- /dev/null
+++ b/packaging/tools/install_arbi_tq.sh
@@ -0,0 +1,298 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_tq.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install tqd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+# tq:2345:respawn:/etc/init.d/tarbitratord start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TQ arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tarbitrator
+ fi
+}
+
+function update_tq() {
+ # Start to update
+ echo -e "${GREEN}Start to update TQ's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mTQ's arbitrator is updated successfully!${NC}"
+}
+
+function install_tq() {
+ # Start to install
+ echo -e "${GREEN}Start to install TQ's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ #echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mTQ's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update_tq
+else
+ install_tq
+fi
+
diff --git a/packaging/tools/install_client_tq.sh b/packaging/tools/install_client_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2537442ee264e9aeb4eb6b3d25a17faf60f4df9a
--- /dev/null
+++ b/packaging/tools/install_client_tq.sh
@@ -0,0 +1,251 @@
+#!/bin/bash
+#
+# This file is used to install TQ client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/tq"
+ log_dir="/var/log/tq"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/tq"
+ log_dir="~/TQLog"
+fi
+
+log_link_dir="/usr/local/tq/log"
+
+cfg_install_dir="/etc/tq"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+# old bin dir
+bin_dir="/usr/local/tq/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/tq ] && ${csudo} ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || :
+ if [ "$osType" != "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || :
+ [ -x ${install_main_dir}/bin/tqdump ] && ${csudo} ln -s ${install_main_dir}/bin/tqdump ${bin_link_dir}/tqdump || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_tq.sh ${bin_link_dir}/rmtq || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update_tq() {
+ # Start to update
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to update TQ client...${NC}"
+ # Stop the client shell if running
+ if pidof tq &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+function install_tq() {
+ # Start to install
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to install TQ client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/tqd ]; then
+ echo -e "\033[44;32;1mThere are already installed TQ server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/tq ]; then
+ update_flag=1
+ update_tq
+ else
+ install_tq
+ fi
diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..52e08cb6b0d00b25686b87e2f066401e0388d4ce
--- /dev/null
+++ b/packaging/tools/install_tq.sh
@@ -0,0 +1,977 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/tq"
+log_dir="/var/log/tq"
+
+data_link_dir="/usr/local/tq/data"
+log_link_dir="/usr/local/tq/log"
+
+cfg_install_dir="/etc/tq"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+# old bin dir
+bin_dir="/usr/local/tq/bin"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact taosdata.com for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqd || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/tq ] && ${csudo} ln -s ${install_main_dir}/bin/tq ${bin_link_dir}/tq || :
+ [ -x ${install_main_dir}/bin/tqd ] && ${csudo} ln -s ${install_main_dir}/bin/tqd ${bin_link_dir}/tqd || :
+ [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || :
+ [ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ #if [ "$verMode" == "cluster" ]; then
+ # # Compatible with version 1.5
+ # ${csudo} mkdir -p ${v15_java_app_dir}
+ # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
+ # ${csudo} chmod 777 ${v15_java_app_dir} || :
+ #fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
+function add_newHostname_to_hosts() {
+ localIp="127.0.0.1"
+ OLD_IFS="$IFS"
+ IFS=" "
+ iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
+ arr=($iphost)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$localIp" ]]; then
+ return
+ fi
+ done
+ ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
+}
+
+function set_hostname() {
+ echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
+ read newHostname
+ while true; do
+ if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
+ break
+ else
+ read -p "Please enter one hostname(must not be 'localhost'):" newHostname
+ fi
+ done
+
+ ${csudo} hostname $newHostname ||:
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ echo
+ echo "set hostname fail!"
+ return
+ fi
+ #echo -e -n "$(hostnamectl status --static)"
+ #echo -e -n "$(hostnamectl status --transient)"
+ #echo -e -n "$(hostnamectl status --pretty)"
+
+ #ubuntu/centos /etc/hostname
+ if [[ -e /etc/hostname ]]; then
+ ${csudo} echo $newHostname > /etc/hostname ||:
+ fi
+
+ #debian: #HOSTNAME=yourname
+ if [[ -e /etc/sysconfig/network ]]; then
+ ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
+ fi
+
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$newHostname
+
+ if [[ -e /etc/hosts ]]; then
+ add_newHostname_to_hosts $newHostname
+ fi
+}
+
+function is_correct_ipaddr() {
+ newIp=$1
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($iplist)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$newIp" ]]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function set_ipAsFqdn() {
+ iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
+ if [ -z "$iplist" ]; then
+ iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
+ fi
+
+ if [ -z "$iplist" ]; then
+ echo
+ echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
+ localFqdn="127.0.0.1"
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ echo
+ return
+ fi
+
+ echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
+ echo
+ echo -e -n "${GREEN}$iplist${NC}"
+ echo
+ echo
+ echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
+ read localFqdn
+ while true; do
+ if [ ! -z "$localFqdn" ]; then
+ # Check if correct ip address
+ is_correct_ipaddr $localFqdn
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ read -p "Please choose an IP from local IP list:" localFqdn
+ else
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ break
+ fi
+ else
+ read -p "Please choose an IP from local IP list:" localFqdn
+ fi
+ done
+}
+
+function local_fqdn_check() {
+ #serverFqdn=$(hostname)
+ echo
+ echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
+ echo
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
+ fi
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ local_fqdn_check
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for TQ cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TQ cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tqd &> /dev/null; then
+ ${csudo} service tqd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} chkconfig --del tqd || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} insserv -r tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} update-rc.d -f tqd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tqd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install tqd service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tqd.deb ${install_main_dir}/init.d/tqd
+ ${csudo} cp ${script_dir}/init.d/tqd.deb ${service_config_dir}/tqd && ${csudo} chmod a+x ${service_config_dir}/tqd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tqd.rpm ${install_main_dir}/init.d/tqd
+ ${csudo} cp ${script_dir}/init.d/tqd.rpm ${service_config_dir}/tqd && ${csudo} chmod a+x ${service_config_dir}/tqd
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tqd || :
+ ${csudo} chkconfig --level 2345 tqd on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tqd || :
+ ${csudo} insserv -d tqd || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tqd defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tqd_service_config="${service_config_dir}/tqd.service"
+ if systemctl is-active --quiet tqd; then
+ echo "TQ is running, stopping it..."
+ ${csudo} systemctl stop tqd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tqd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tqd_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+# tq:2345:respawn:/etc/init.d/tqd start
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tqd_service_config="${service_config_dir}/tqd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Description=TQ server service' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo >> ${tqd_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tqd' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'ExecStartPre=/usr/local/tq/bin/startPre.sh' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo >> ${tqd_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tqd_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tqd_service_config}"
+ ${csudo} systemctl enable tqd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop tqd
+ kill_process tqd
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
+
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/tqd -V | head -1 | cut -d ' ' -f 5)
+ fi
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update_tq() {
+ # Start to update
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+ install_jemalloc
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update TQ...${NC}"
+ # Stop the service if running
+ if pidof tqd &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tqd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tqd stop || :
+ else
+ kill_process tqd
+ fi
+ sleep 1
+ fi
+ if [ "$verMode" == "cluster" ]; then
+ if pidof nginx &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop nginxd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service nginxd stop || :
+ else
+ kill_process nginx
+ fi
+ sleep 1
+ fi
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for TQ is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mTQ is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} service tqd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ./tqd${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq -h $serverFqdn${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mTQ is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+function install_tq() {
+ # Start to install
+ if [ ! -e tq.tar.gz ]; then
+ echo "File tq.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf tq.tar.gz
+
+ echo -e "${GREEN}Start to install TQ...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for TQ is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for TQ does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mTQ is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure TQ ${NC}: edit /etc/tq/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} systemctl start tqd${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start TQ ${NC}: ${csudo} service tqd start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start TQ ${NC}: tqd${NC}"
+ fi
+
+ #if [ ${openresty_work} = 'true' ]; then
+ # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ #else
+ # echo -e "${GREEN_DARK}To access TQ ${NC}: use ${GREEN_UNDERLINE}tq${NC} in shell${NC}"
+ #fi
+
+ if [ ! -z "$firstEp" ]; then
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ elif [ ! -z "$serverFqdn" ]; then
+ echo -e "${GREEN_DARK}To access TQ ${NC}: tq -h $serverFqdn${GREEN_DARK} to login into TQ server${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mTQ is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mTQ client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf tq.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+serverFqdn=$(hostname)
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/tqd ]; then
+ update_flag=1
+ update_tq
+ else
+ install_tq
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/tq ]; then
+ update_flag=1
+ update_tq client
+ else
+ install_tq client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 842963fb920e6596c4e4887806997eaeba8d93ea..d400d0b91a2d02e9b3e0232d67e2ed6b00cdf541 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -19,35 +19,35 @@ else
fi
# Dynamic directory
-data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then
+ data_dir="/var/lib/taos"
log_dir="/var/log/taos"
-else
- log_dir=~/TDengine/log
-fi
-
-data_link_dir="/usr/local/taos/data"
-log_link_dir="/usr/local/taos/log"
-cfg_install_dir="/etc/taos"
+ cfg_install_dir="/etc/taos"
-if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
+
+ install_main_dir="/usr/local/taos"
+
+ bin_dir="/usr/local/taos/bin"
else
+ data_dir="/usr/local/var/lib/taos"
+ log_dir="/usr/local/var/log/taos"
+
+ cfg_install_dir="/usr/local/etc/taos"
+
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
-fi
-#install main path
-install_main_dir="/usr/local/taos"
+ install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
-# old bin dir
-bin_dir="/usr/local/taos/bin"
+ bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin"
+fi
service_config_dir="/etc/systemd/system"
@@ -59,12 +59,11 @@ GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
-if command -v sudo > /dev/null; then
- csudo="sudo"
-fi
if [ "$osType" != "Darwin" ]; then
-
+ if command -v sudo > /dev/null; then
+ csudo="sudo"
+ fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
@@ -137,17 +136,17 @@ function install_main_path() {
function install_bin() {
# Remove links
- ${csudo} rm -f ${bin_link_dir}/taos || :
+ ${csudo} rm -f ${bin_link_dir}/taos || :
+ ${csudo} rm -f ${bin_link_dir}/taosd || :
+ ${csudo} rm -f ${bin_link_dir}/taosdemo || :
+ ${csudo} rm -f ${bin_link_dir}/taosdump || :
if [ "$osType" != "Darwin" ]; then
- ${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
+ ${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/set_core || :
+ ${csudo} rm -f ${bin_link_dir}/rmtaos || :
fi
-
- ${csudo} rm -f ${bin_link_dir}/rmtaos || :
-
+
${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
@@ -161,21 +160,21 @@ function install_bin() {
${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
- [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
+ [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
+ [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
+ [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
+ [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
if [ "$osType" != "Darwin" ]; then
- [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
- [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
- [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi
-
+
if [ "$osType" != "Darwin" ]; then
- [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
- else
- [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
+ [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
fi
}
+
function install_jemalloc() {
if [ "$osType" != "Darwin" ]; then
/usr/bin/install -c -d /usr/local/bin
@@ -219,7 +218,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -244,11 +243,12 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
- ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
+
install_jemalloc
if [ "$osType" != "Darwin" ]; then
@@ -258,10 +258,14 @@ function install_lib() {
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ fi
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
- ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
- ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+ fi
}
function install_config() {
@@ -269,23 +273,20 @@ function install_config() {
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/../cfg/taos.cfg ] &&
+ ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ fi
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
-
- if [ "$osType" != "Darwin" ]; then
- ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
- else
- mkdir -p ${log_dir} && chmod 777 ${log_dir}
- fi
-
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
@@ -306,7 +307,6 @@ function install_connector() {
echo "WARNING: go connector not found, please check if want to use it!"
fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
-
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
}
@@ -420,7 +420,7 @@ function install_service() {
}
function update_TDengine() {
- echo -e "${GREEN}Start to update TDEngine...${NC}"
+ echo -e "${GREEN}Start to update TDengine...${NC}"
# Stop the service if running
if [ "$osType" != "Darwin" ]; then
@@ -486,24 +486,21 @@ function install_TDengine() {
else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
fi
-
+
install_main_path
- if [ "$osType" != "Darwin" ]; then
- install_data
- fi
+ install_data
install_log
install_header
install_lib
install_connector
install_examples
-
install_bin
-
+
if [ "$osType" != "Darwin" ]; then
install_service
fi
-
+
install_config
if [ "$osType" != "Darwin" ]; then
diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh
index 5346a79c8fb6eada8ad0f0c5e59fdc1690466839..6dcabc2a0622e5fec67431c8663541a2b40048e1 100755
--- a/packaging/tools/makearbi.sh
+++ b/packaging/tools/makearbi.sh
@@ -59,7 +59,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
-if [ "$verType" == "beta" ]; then
+if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
diff --git a/packaging/tools/makearbi_tq.sh b/packaging/tools/makearbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c10dfec255d411965a3887942e5d2aded4635979
--- /dev/null
+++ b/packaging/tools/makearbi_tq.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-arbitrator-${version}"
+else
+ install_dir="${release_dir}/TQ-arbitrator-${version}"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_tq.sh"
+install_files="${script_dir}/install_arbi_tq.sh"
+
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_tq.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index 97e80088bea0dfedb5967071787b54380f89c170..8fc431bfbc66d4f9d482ab5885d282081139ef4d 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -182,7 +182,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
-if [ "$verType" == "beta" ]; then
+if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 31a8cff7ae327accd995d28386fc89bc9b8716d2..89591cac234b190f55d144ccf98cb2d5c70a7936 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -124,6 +124,39 @@ else
fi
chmod a+x ${install_dir}/bin/* || :
+if [ -f ${build_dir}/bin/jemalloc-config ]; then
+ mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
+ cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
+ if [ -f ${build_dir}/bin/jemalloc.sh ]; then
+ cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/bin/jeprof ]; then
+ cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
+ cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
+ cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
+ ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
+ cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
+ fi
+ if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
+ fi
+ if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
+ cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
+ fi
+fi
+
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..03d9b13059daadfdc7207c78b6f89cae321f25ac
--- /dev/null
+++ b/packaging/tools/makeclient_tq.sh
@@ -0,0 +1,236 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-client-${version}"
+else
+ install_dir="${release_dir}/TQ-client-${version}"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+# if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/tqd
+# strip ${build_dir}/bin/tq
+# bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
+# else
+# bin_files="${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${script_dir}/remove_client_tq.sh ${script_dir}/set_core.sh"
+# fi
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/tq ${script_dir}/remove_client_tq.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
+install_files="${script_dir}/install_client_tq.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+if [ -f ${build_dir}/bin/jemalloc-config ]; then
+ mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
+ cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
+ if [ -f ${build_dir}/bin/jemalloc.sh ]; then
+ cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/bin/jeprof ]; then
+ cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
+ cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
+ cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
+ ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
+ cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
+ fi
+ if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
+ fi
+ if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
+ cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
+ fi
+fi
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f tq.tar.gz * --remove-files || :
+else
+ tar -zcv -f tq.tar.gz * || :
+ mv tq.tar.gz ..
+ rm -rf ./*
+ mv ../tq.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh
+ mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_tq.sh >> install_client_tq_temp.sh
+ mv install_client_tq_temp.sh ${install_dir}/install_client_tq.sh
+fi
+chmod a+x ${install_dir}/install_client_tq.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ if [ "$osType" != "Darwin" ]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+ fi
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 56ab24426f687369870aa8729193c8021b0fe510..e9266ec80da293571ece07dab9c724b5b8c12adf 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -35,7 +35,7 @@ fi
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
- bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
+ bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
@@ -215,7 +215,7 @@ pkg_name=${install_dir}-${osType}-${cpuType}
# exit 1
# fi
-if [ "$verType" == "beta" ]; then
+if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6f897de0ce5e7287e06719562199e8ed139b02ec
--- /dev/null
+++ b/packaging/tools/makepkg_tq.sh
@@ -0,0 +1,224 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+versionComp=$9
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/TQ-enterprise-server-${version}"
+else
+ install_dir="${release_dir}/TQ-server-${version}"
+fi
+
+# Directories and files.
+#if [ "$pagMode" == "lite" ]; then
+# strip ${build_dir}/bin/taosd
+# strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
+#else
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh\
+# ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb"
+#fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+install_files="${script_dir}/install_tq.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# Init file
+#init_dir=${script_dir}/deb
+#if [ $package_type = "centos" ]; then
+# init_dir=${script_dir}/rpm
+#fi
+#init_files=${init_dir}/tqd
+# temp use rpm's tqd. TODO: later modify according to os type
+#init_file_deb=${script_dir}/../deb/tqd
+#init_file_rpm=${script_dir}/../rpm/tqd
+#init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+#init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${script_dir}/remove_tq.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+else
+# bin_files="${build_dir}/bin/tqd ${build_dir}/bin/tq ${build_dir}/bin/tqdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_tq.sh ${script_dir}/set_core.sh"
+ cp ${build_dir}/bin/taos ${install_dir}/bin/tq
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
+ cp ${script_dir}/remove_tq.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/startPre.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+#mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/tqd.deb
+#mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/tqd.rpm
+#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+#mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_tq.sh >> remove_tq_temp.sh
+ mv remove_tq_temp.sh ${install_dir}/bin/remove_tq.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/TQ/g" ${install_dir}/nginxd/admin/js/*.js
+
+ sed -i '/dataDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+ sed -i '/logDir/ {s/taos/tq/g}' ${install_dir}/cfg/taos.cfg
+ sed -i "s/TDengine/TQ/g" ${install_dir}/cfg/taos.cfg
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+cd ${install_dir}
+tar -zcv -f tq.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar tq.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_tq.sh >> install_tq_temp.sh
+ mv install_tq_temp.sh ${install_dir}/install_tq.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_tq_temp.sh
+ mv install_tq_temp.sh ${install_dir}/install_tq.sh
+fi
+chmod a+x ${install_dir}/install_tq.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/matlab/TDengineDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ sed -i '/root/ {s/taosdata/tqueue/g}' ${install_dir}/examples/go/taosdemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/cinterface.py
+
+ sed -i '/password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/tqueue/g}' ${install_dir}/connector/python/taos/connection.py
+fi
+# Copy release note
+# cp ${script_dir}/release_note ${install_dir}
+
+# exit 1
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/remove_arbi_tq.sh b/packaging/tools/remove_arbi_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3d99b6d41a74938d74383df3d8cdfc75c2ebb7c8
--- /dev/null
+++ b/packaging/tools/remove_arbi_tq.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TQ's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TQ tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "TQ's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}TQ's arbitrator is removed successfully!${NC}"
+echo
\ No newline at end of file
diff --git a/packaging/tools/remove_client_tq.sh b/packaging/tools/remove_client_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ad8056c18cc32623edb8b77bf6aa17070acc1cbc
--- /dev/null
+++ b/packaging/tools/remove_client_tq.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tq"
+
+log_link_dir="/usr/local/tq/log"
+cfg_link_dir="/usr/local/tq/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ #pid=$(ps -ef | grep "tq" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$(pidof tq)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}TQ client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_tq.sh b/packaging/tools/remove_tq.sh
new file mode 100755
index 0000000000000000000000000000000000000000..211eed4dff09ab5da00d5c475cd93148b5ce1b24
--- /dev/null
+++ b/packaging/tools/remove_tq.sh
@@ -0,0 +1,227 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tq"
+data_link_dir="/usr/local/tq/data"
+log_link_dir="/usr/local/tq/log"
+cfg_link_dir="/usr/local/tq/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+# v1.5 jar dir
+#v15_java_app_dir="/usr/local/lib/tq"
+
+service_config_dir="/etc/systemd/system"
+tq_service_name="tqd"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tqd() {
+ pid=$(ps -ef | grep "tqd" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tq || :
+ ${csudo} rm -f ${bin_link_dir}/tqd || :
+ ${csudo} rm -f ${bin_link_dir}/tqdemo || :
+ ${csudo} rm -f ${bin_link_dir}/tqdump || :
+ ${csudo} rm -f ${bin_link_dir}/rmtq || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ tq_service_config="${service_config_dir}/${tq_service_name}.service"
+ if systemctl is-active --quiet ${tq_service_name}; then
+ echo "TQ tqd is running, stopping it..."
+ ${csudo} systemctl stop ${tq_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tq_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tq_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "TDengine tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for TDengine is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="tq:2345:respawn:${service_config_dir}/tqd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tqd &> /dev/null; then
+ echo "TQ tqd is running, stopping it..."
+ ${csudo} service tqd stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "TQ tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} chkconfig --del tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} insserv -r tqd || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tqd ]; then
+ ${csudo} update-rc.d -f tqd remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tqd || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tqd
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+#if echo $osinfo | grep -qwi "ubuntu" ; then
+## echo "this is ubuntu system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "debian" ; then
+## echo "this is debian system"
+# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
+#elif echo $osinfo | grep -qwi "centos" ; then
+## echo "this is centos system"
+# ${csudo} rpm -e --noscripts tdengine || :
+#fi
+
+echo -e "${GREEN}TQ is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh
old mode 100644
new mode 100755
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 64096baacd795bb5c689c2e73d12b5b660eb7315..e4fdae558c3282eee4d2c6d0591b01fff53a1f98 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
base: core18
-version: '2.0.20.17'
+version: '2.2.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.20.17
+ - usr/lib/libtaos.so.2.2.0.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c
index 7d94df1c23ab7824dbada0423beec14530a2101c..04a14357c9e602807f5aa254d8a5ea25bc4b328d 100644
--- a/src/balance/src/bnScore.c
+++ b/src/balance/src/bnScore.c
@@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
- tsBnDnodes.maxSize = dnodesNum * 2;
- tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
+ int32_t maxSize = dnodesNum * 2;
+ SDnodeObj** list1 = NULL;
+ int32_t retry = 0;
+
+ while(list1 == NULL && retry++ < 3) {
+ list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
+ }
+ if(list1) {
+ tsBnDnodes.list = list1;
+ tsBnDnodes.maxSize = maxSize;
+ }
}
}
diff --git a/src/balance/src/bnThread.c b/src/balance/src/bnThread.c
index 44cb24effa09688db79bb9ae8fa40a381c0c0404..20da83ccba4c192a733cdbb530a2b6aab358896a 100644
--- a/src/balance/src/bnThread.c
+++ b/src/balance/src/bnThread.c
@@ -23,6 +23,8 @@
static SBnThread tsBnThread;
static void *bnThreadFunc(void *arg) {
+ setThreadName("balance");
+
while (1) {
pthread_mutex_lock(&tsBnThread.mutex);
if (tsBnThread.stop) {
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index bdd42cb457875fe2d554fc4c65636910806af382..0d06e5d39c0ed1916e0c2af7ccce5918e31ac42f 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -4,6 +4,8 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(jni)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
@@ -11,7 +13,7 @@ IF (TD_LINUX)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt)
+ TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt ${VAR_TSZ})
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
@@ -21,7 +23,7 @@ IF (TD_LINUX)
IF (TD_LINUX_64)
TARGET_LINK_LIBRARIES(taos lua)
ENDIF ()
-
+
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .so
@@ -37,13 +39,13 @@ ELSEIF (TD_DARWIN)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m)
+ TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m)
+ TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
@@ -68,19 +70,19 @@ ELSEIF (TD_WINDOWS)
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
- TARGET_LINK_LIBRARIES(taos trpc tutil query)
+ TARGET_LINK_LIBRARIES(taos trpc tutil query lua)
ELSEIF (TD_DARWIN)
SET(CMAKE_MACOSX_RPATH 1)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m)
+ TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m)
+ TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
diff --git a/src/client/inc/tscGlobalmerge.h b/src/client/inc/tscGlobalmerge.h
new file mode 100644
index 0000000000000000000000000000000000000000..875bb5e178d1d0f50b78b4b6c0cf6ae29b884a1a
--- /dev/null
+++ b/src/client/inc/tscGlobalmerge.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_TSCGLOBALMERGE_H
+#define TDENGINE_TSCGLOBALMERGE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "qExtbuffer.h"
+#include "qFill.h"
+#include "taosmsg.h"
+#include "tlosertree.h"
+#include "qExecutor.h"
+
+#define MAX_NUM_OF_SUBQUERY_RETRY 3
+
+struct SQLFunctionCtx;
+
+typedef struct SLocalDataSource {
+ tExtMemBuffer *pMemBuffer;
+ int32_t flushoutIdx;
+ int32_t pageId;
+ int32_t rowIdx;
+ tFilePage filePage;
+} SLocalDataSource;
+
+typedef struct SGlobalMerger {
+ SLocalDataSource **pLocalDataSrc;
+ int32_t numOfBuffer;
+ int32_t numOfCompleted;
+ int32_t numOfVnode;
+ SLoserTreeInfo *pLoserTree;
+ int32_t rowSize; // size of each intermediate result.
+ tOrderDescriptor *pDesc;
+ tExtMemBuffer **pExtMemBuffer; // disk-based buffer
+ char *buf; // temp buffer
+} SGlobalMerger;
+
+struct SSqlObj;
+
+typedef struct SRetrieveSupport {
+ tExtMemBuffer ** pExtMemBuffer; // for build loser tree
+ tOrderDescriptor *pOrderDescriptor;
+ int32_t subqueryIndex; // index of current vnode in vnode list
+ struct SSqlObj *pParentSql;
+ tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
+ uint32_t localBufferSize;
+ uint32_t numOfRetry; // record the number of retry times
+} SRetrieveSupport;
+
+int32_t tscCreateGlobalMergerEnv(SQueryInfo* pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub, tOrderDescriptor **pDesc, uint32_t nBufferSize, int64_t id);
+
+void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes);
+
+int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
+ int32_t numOfRows, int32_t orderType);
+
+int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType);
+
+/*
+ * create local reducer to launch the second-stage reduce process at client site
+ */
+int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
+ SQueryInfo *pQueryInfo, SGlobalMerger **pMerger, int64_t id);
+
+void tscDestroyGlobalMerger(SGlobalMerger* pMerger);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_TSCGLOBALMERGE_H
diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h
deleted file mode 100644
index 581cd37cbd53cb87847fc5a13c88b03eb797d93a..0000000000000000000000000000000000000000
--- a/src/client/inc/tscLocalMerge.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_TSCLOCALMERGE_H
-#define TDENGINE_TSCLOCALMERGE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "qExtbuffer.h"
-#include "qFill.h"
-#include "taosmsg.h"
-#include "tlosertree.h"
-#include "tsclient.h"
-
-#define MAX_NUM_OF_SUBQUERY_RETRY 3
-
-struct SQLFunctionCtx;
-
-typedef struct SLocalDataSource {
- tExtMemBuffer *pMemBuffer;
- int32_t flushoutIdx;
- int32_t pageId;
- int32_t rowIdx;
- tFilePage filePage;
-} SLocalDataSource;
-
-typedef struct SLocalMerger {
- SLocalDataSource ** pLocalDataSrc;
- int32_t numOfBuffer;
- int32_t numOfCompleted;
- int32_t numOfVnode;
- SLoserTreeInfo * pLoserTree;
- char * prevRowOfInput;
- tFilePage * pResultBuf;
- int32_t nResultBufSize;
- tFilePage * pTempBuffer;
- struct SQLFunctionCtx *pCtx;
- int32_t rowSize; // size of each intermediate result.
- bool hasPrevRow; // cannot be released
- bool hasUnprocessedRow;
- tOrderDescriptor * pDesc;
- SColumnModel * resColModel;
- SColumnModel* finalModel;
- tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
- SFillInfo* pFillInfo; // interpolation support structure
- char* pFinalRes; // result data after interpo
- tFilePage* discardData;
- bool discard;
- int32_t offset; // limit offset value
- bool orderPrjOnSTable; // projection query on stable
-} SLocalMerger;
-
-typedef struct SRetrieveSupport {
- tExtMemBuffer ** pExtMemBuffer; // for build loser tree
- tOrderDescriptor *pOrderDescriptor;
- SColumnModel* pFinalColModel; // colModel for final result
- SColumnModel* pFFColModel;
- int32_t subqueryIndex; // index of current vnode in vnode list
- SSqlObj * pParentSql;
- tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
- uint32_t numOfRetry; // record the number of retry times
-} SRetrieveSupport;
-
-int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
- SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSize);
-
-void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel* pFFModel,
- int32_t numOfVnodes);
-
-int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
- int32_t numOfRows, int32_t orderType);
-
-int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType);
-
-/*
- * create local reducer to launch the second-stage reduce process at client site
- */
-void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
- SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
-
-void tscDestroyLocalMerger(SSqlObj *pSql);
-
-int32_t tscDoLocalMerge(SSqlObj *pSql);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // TDENGINE_TSCLOCALMERGE_H
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index 15ef54b7b13eb8463c54a7b979eb6d007560bb0e..a012ca5a7fe741b8859465504cbc971a7e46952c 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -48,6 +48,14 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
+int tsInsertInitialCheck(SSqlObj *pSql);
+
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
+
+void tscFreeRetrieveSup(SSqlObj *pSql);
+
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index a26f03003e3921539934fc7806a468b663535226..fcf61580c8967756614efdce55010fca920b12c2 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -25,17 +25,22 @@ extern "C" {
#include "qExtbuffer.h"
#include "taosdef.h"
#include "tbuffer.h"
-#include "tscLocalMerge.h"
+#include "tscGlobalmerge.h"
+#include "tsched.h"
#include "tsclient.h"
-#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
+#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
+
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
-
-#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
+
+#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
+#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
+ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
+
#pragma pack(push,1)
// this struct is transfered as binary, padding two bytes to avoid
// an 'uid' whose low bytes is 0xff being recoginized as NULL,
@@ -50,7 +55,7 @@ typedef struct STidTags {
#pragma pack(pop)
typedef struct SJoinSupporter {
- SSqlObj* pObj; // parent SqlObj
+ int64_t pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
SInterval interval;
SLimitVal limit; // limit info
@@ -59,7 +64,7 @@ typedef struct SJoinSupporter {
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
- SSqlGroupbyExpr groupInfo; // group by info
+ SGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
@@ -85,36 +90,41 @@ typedef struct SMergeTsCtx {
int8_t compared;
}SMergeTsCtx;
-
typedef struct SVgroupTableInfo {
SVgroupInfo vgInfo;
- SArray* itemList; //SArray
+ SArray *itemList; // SArray
} SVgroupTableInfo;
-static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) {
- assert(pCmd != NULL && subClauseIndex >= 0);
+typedef struct SBlockKeyTuple {
+ TSKEY skey;
+ void* payloadAddr;
+} SBlockKeyTuple;
- if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) {
- return NULL;
- }
+typedef struct SBlockKeyInfo {
+ int32_t maxBytesAlloc;
+ SBlockKeyTuple* pKeyTuple;
+} SBlockKeyInfo;
- return pCmd->pQueryInfo[subClauseIndex];
-}
+int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
-void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
+void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
+int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
+int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo);
+void doRetrieveSubqueryData(SSchedMsg *pMsg);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
+void* tscDestroyUdfArrayList(SArray* pUdfList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
-int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
+int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
@@ -127,26 +137,38 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
+bool tscIsIrateQuery(SQueryInfo* pQueryInfo);
+
+bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
+bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
-bool tscIsTopbotQuery(SQueryInfo* pQueryInfo);
-int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo);
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
+bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
+bool hasTagValOutput(SQueryInfo* pQueryInfo);
+bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
+bool isStabledev(SQueryInfo* pQueryInfo);
+bool isTsCompQuery(SQueryInfo* pQueryInfo);
+bool isBlockDistQuery(SQueryInfo* pQueryInfo);
+bool isSimpleAggregateRv(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
+bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo);
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
+bool tscHasColumnFilter(SQueryInfo* pQueryInfo);
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryTags(SQueryInfo* pQueryInfo);
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo);
-SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
- SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
+SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
+ SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType, int16_t colId);
-int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql);
+int32_t tscSetTableFullName(SName* pName, SStrToken* pzTableName, SSqlObj* pSql);
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
@@ -165,36 +187,53 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo);
+void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArray* pExprList);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
+int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo);
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
-int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
+void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid);
+int32_t tscFieldInfoSetSize(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
int32_t tscGetResRowLength(SArray* pExprList);
-SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
-SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+ int16_t size, int16_t resColId, int16_t interSize, int32_t colType);
+
+void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
+
+SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
-SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
+SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
-size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
-void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
-SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
-int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
-void tscSqlExprInfoDestroy(SArray* pExprInfo);
+size_t tscNumOfExprs(SQueryInfo* pQueryInfo);
+SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index);
+int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
+int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy);
+void tscExprAssign(SExprInfo* dst, const SExprInfo* src);
+void tscExprDestroy(SArray* pExprInfo);
+
+int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, SExprInfo*** pExpr, int32_t* num);
+
+void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
SColumn* tscColumnClone(const SColumn* src);
-bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex);
-SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
-SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid);
+SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema);
void tscColumnListDestroy(SArray* pColList);
+void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
+void tscColumnListCopyAll(SArray* dst, const SArray* src);
+
+void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar);
void tscDequoteAndTrimToken(SStrToken* pToken);
int32_t tscValidateName(SStrToken* pToken);
@@ -214,11 +253,14 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
bool tscShouldBeFreed(SSqlObj* pSql);
-STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
+STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t tableIndex);
STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
-SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
-SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
+void tscInitQueryInfo(SQueryInfo* pQueryInfo);
+void tscClearSubqueryInfo(SSqlCmd* pCmd);
+int32_t tscAddQueryInfo(SSqlCmd *pCmd);
+SQueryInfo *tscGetQueryInfo(SSqlCmd* pCmd);
+SQueryInfo *tscGetQueryInfoS(SSqlCmd *pCmd);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
@@ -226,22 +268,20 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
-int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
-void tscInitQueryInfo(SQueryInfo* pQueryInfo);
-
-void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
-int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex);
+int tscGetSTableVgroupInfo(SSqlObj* pSql, SQueryInfo* pQueryInfo);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
-int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists);
+int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal);
+int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo);
void tscResetForNextRetrieve(SSqlRes* pRes);
-void tscDoQuery(SSqlObj* pSql);
+void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
+void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
@@ -266,16 +306,17 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql);
+void tscInitResForMerge(SSqlRes* pRes);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t fp, void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
-void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
+void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlCmd* pCmd);
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
-void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
+void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
bool hasMoreClauseToTry(SSqlObj* pSql);
@@ -283,9 +324,13 @@ bool hasMoreClauseToTry(SSqlObj* pSql);
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
-void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corEpSet);
+int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, SArray* pUdfList, __async_cb_func_t fp, bool metaClone);
+
+int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray);
+
+bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx);
bool tscSetSqlOwner(SSqlObj* pSql);
void tscClearSqlOwner(SSqlObj* pSql);
@@ -298,15 +343,26 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
-int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf);
+int32_t tscCreateTableMetaFromSTableMeta(STableMeta** pChild, const char* name, size_t *tableMetaCapacity);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
+SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
+int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo);
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo);
+int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
+void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);
char* strdup_throw(const char* str);
+bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
+SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
+
+void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id);
+
+char* cloneCurrentDBName(SSqlObj* pSql);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tschemautil.h b/src/client/inc/tschemautil.h
deleted file mode 100644
index 0026a27e199289fa06dbcd8f10a2313bc61430ea..0000000000000000000000000000000000000000
--- a/src/client/inc/tschemautil.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef TDENGINE_TSCHEMAUTIL_H
-#define TDENGINE_TSCHEMAUTIL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "taosmsg.h"
-#include "tsclient.h"
-#include "ttoken.h"
-
-/**
- * get the number of tags of this table
- * @param pTableMeta
- * @return
- */
-int32_t tscGetNumOfTags(const STableMeta* pTableMeta);
-
-/**
- * get the number of columns of this table
- * @param pTableMeta
- * @return
- */
-int32_t tscGetNumOfColumns(const STableMeta* pTableMeta);
-
-/**
- * get the basic info of this table
- * @param pTableMeta
- * @return
- */
-STableComInfo tscGetTableInfo(const STableMeta* pTableMeta);
-
-/**
- * get the schema
- * @param pTableMeta
- * @return
- */
-SSchema* tscGetTableSchema(const STableMeta* pTableMeta);
-
-/**
- * get the tag schema
- * @param pMeta
- * @return
- */
-SSchema *tscGetTableTagSchema(const STableMeta *pMeta);
-
-/**
- * get the column schema according to the column index
- * @param pMeta
- * @param colIndex
- * @return
- */
-SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
-
-/**
- * get the column schema according to the column id
- * @param pTableMeta
- * @param colId
- * @return
- */
-SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
-
-/**
- * create the table meta from the msg
- * @param pTableMetaMsg
- * @param size size of the table meta
- * @return
- */
-STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
-
-bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
-SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // TDENGINE_TSCHEMAUTIL_H
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 6e273df38fb9a3889b4d499d2bf2165d16d20f59..b9aaea8469795771854919a2584d28d5c3f4e9e3 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -35,27 +35,19 @@ extern "C" {
#include "qExecutor.h"
#include "qSqlparser.h"
#include "qTsbuf.h"
+#include "qUtil.h"
#include "tcmdtype.h"
+typedef enum {
+ TAOS_REQ_FROM_SHELL,
+ TAOS_REQ_FROM_HTTP
+} SReqOrigin;
+
// forward declaration
struct SSqlInfo;
-struct SLocalMerger;
-
-// data source from sql string or from file
-enum {
- DATA_FROM_SQL_STRING = 1,
- DATA_FROM_DATA_FILE = 2,
-};
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows);
-typedef struct STableComInfo {
- uint8_t numOfTags;
- uint8_t precision;
- int16_t numOfColumns;
- int32_t rowSize;
-} STableComInfo;
-
typedef struct SNewVgroupInfo {
int32_t vgId;
int8_t inUse;
@@ -71,137 +63,150 @@ typedef struct CChildTableMeta {
uint64_t suid; // super table id
} CChildTableMeta;
-typedef struct STableMeta {
- int32_t vgId;
- STableId id;
- uint8_t tableType;
- char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
- uint64_t suid; // super table id
- int16_t sversion;
- int16_t tversion;
- STableComInfo tableInfo;
- SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
-} STableMeta;
-
-typedef struct STableMetaInfo {
- STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
- uint32_t tableMetaSize;
- SVgroupsInfo *vgroupList;
- SArray *pVgroupTables; // SArray
-
- /*
- * 1. keep the vgroup index during the multi-vnode super table projection query
- * 2. keep the vgroup index for multi-vnode insertion
- */
- int32_t vgroupIndex;
- SName name;
- char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
- SArray *tagColList; // SArray, involved tag columns
-} STableMetaInfo;
-
-
typedef struct SColumnIndex {
int16_t tableIndex;
int16_t columnIndex;
} SColumnIndex;
-
-typedef struct SFieldInfo {
- int16_t numOfOutput; // number of column in result
- TAOS_FIELD* final;
- SArray *internalField; // SArray
-} SFieldInfo;
-
typedef struct SColumn {
- SColumnIndex colIndex;
- int32_t numOfFilters;
- SColumnFilterInfo *filterInfo;
+ uint64_t tableUid;
+ int32_t columnIndex;
+ SColumnInfo info;
} SColumn;
-/* the structure for sql function in select clause */
-typedef struct SSqlExpr {
- char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
- SColIndex colInfo;
- uint64_t uid; // refactor use the pointer
- int16_t functionId; // function id in aAgg array
- int16_t resType; // return value type
- int16_t resBytes; // length of return value
- int32_t interBytes; // inter result buffer size
- int16_t numOfParams; // argument value of each function
- tVariant param[3]; // parameters are not more than 3
- int32_t offset; // sub result column value of arithmetic expression.
- int16_t resColId; // result column id
- SColumn *pFilter; // expr filter
-} SSqlExpr;
-
-typedef struct SExprFilter {
- tSqlExpr *pExpr; //used for having parse
- SSqlExpr *pSqlExpr;
- SArray *fp;
- SColumn *pFilters; //having filter info
-}SExprFilter;
-
typedef struct SInternalField {
TAOS_FIELD field;
bool visible;
- SExprInfo *pArithExprInfo;
- SSqlExpr *pSqlExpr;
- SExprFilter *pFieldFilters;
+ SExprInfo *pExpr;
} SInternalField;
-typedef struct SCond {
- uint64_t uid;
- int32_t len; // length of tag query condition data
- char * cond;
-} SCond;
-
-typedef struct SJoinNode {
- uint64_t uid;
- int16_t tagColId;
- SArray* tsJoin;
- SArray* tagJoin;
-} SJoinNode;
-
-typedef struct SJoinInfo {
- bool hasJoin;
- SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM];
-} SJoinInfo;
-
-typedef struct STagCond {
- // relation between tbname list and query condition, including : TK_AND or TK_OR
- int16_t relType;
-
- // tbname query condition, only support tbname query condition on one table
- SCond tbnameCond;
-
- // join condition, only support two tables join currently
- SJoinInfo joinInfo;
-
- // for different table, the query condition must be seperated
- SArray *pCond;
-} STagCond;
-
typedef struct SParamInfo {
int32_t idx;
- char type;
+ uint8_t type;
uint8_t timePrec;
int16_t bytes;
uint32_t offset;
} SParamInfo;
-
typedef struct SBoundColumn {
- bool hasVal; // denote if current column has bound or not
- int32_t offset; // all column offset value
+ int32_t offset; // all column offset value
+ int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future
+ uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val)
} SBoundColumn;
+typedef enum {
+ VAL_STAT_HAS = 0x0, // 0 means has val
+ VAL_STAT_NONE = 0x01, // 1 means no val
+} EValStat;
+typedef struct {
+ uint16_t schemaColIdx;
+ uint16_t boundIdx;
+ uint16_t finalIdx;
+} SBoundIdxInfo;
+
+typedef enum _COL_ORDER_STATUS {
+ ORDER_STATUS_UNKNOWN = 0,
+ ORDER_STATUS_ORDERED = 1,
+ ORDER_STATUS_DISORDERED = 2,
+} EOrderStatus;
typedef struct SParsedDataColInfo {
- int16_t numOfCols;
- int16_t numOfBound;
- int32_t *boundedColumns;
- SBoundColumn *cols;
+ int16_t numOfCols;
+ int16_t numOfBound;
+ uint16_t flen; // TODO: get from STSchema
+ uint16_t allNullLen; // TODO: get from STSchema
+ uint16_t extendedVarLen;
+ int32_t * boundedColumns; // bound column idx according to schema
+ SBoundColumn * cols;
+ SBoundIdxInfo *colIdxInfo;
+ int8_t orderStatus; // bound columns
} SParsedDataColInfo;
+#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED)
+
+typedef struct {
+ int32_t dataLen; // len of SDataRow
+ int32_t kvLen; // len of SKVRow
+} SMemRowInfo;
+typedef struct {
+ uint8_t memRowType; // default is 0, that is SDataRow
+ uint8_t compareStat; // 0 no need, 1 need compare
+ TDRowTLenT kvRowInitLen;
+ SMemRowInfo *rowInfo;
+} SMemRowBuilder;
+
+typedef enum {
+ ROW_COMPARE_NO_NEED = 0,
+ ROW_COMPARE_NEED = 1,
+} ERowCompareStat;
+
+int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
+
+int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
+ int32_t allNullLen);
+void destroyMemRowBuilder(SMemRowBuilder *pBuilder);
+
+/**
+ * @brief
+ *
+ * @param memRowType
+ * @param spd
+ * @param idx the absolute bound index of columns
+ * @return FORCE_INLINE
+ */
+static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd,
+ int32_t idx, int32_t *toffset, int16_t *colId) {
+ int32_t schemaIdx = 0;
+ if (IS_DATA_COL_ORDERED(spd)) {
+ schemaIdx = spd->boundedColumns[idx];
+ if (isDataRowT(memRowType)) {
+ *toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart
+ } else {
+ *toffset = idx * sizeof(SColIdx); // the offset of SColIdx
+ }
+ } else {
+ ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx);
+ schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx;
+ if (isDataRowT(memRowType)) {
+ *toffset = (spd->cols + schemaIdx)->toffset;
+ } else {
+ *toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx);
+ }
+ }
+ *colId = pSchema[schemaIdx].colId;
+}
+
+/**
+ * @brief Applicable to consume by multi-columns
+ *
+ * @param row
+ * @param value
+ * @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal()
+ * @param colId
+ * @param colType
+ * @param idx index in SSchema
+ * @param pBuilder
+ * @param spd
+ * @return FORCE_INLINE
+ */
+static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
+ int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder,
+ int32_t rowNum) {
+ tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
+ if (pBuilder->compareStat == ROW_COMPARE_NEED) {
+ SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum;
+ tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen);
+ }
+}
+
+// Applicable to consume by one row
+static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
+ int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen,
+ uint8_t compareStat) {
+ tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset);
+ if (compareStat == ROW_COMPARE_NEED) {
+ tdGetColAppendDeltaLen(value, colType, dataLen, kvLen);
+ }
+}
typedef struct STableDataBlocks {
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
@@ -215,90 +220,70 @@ typedef struct STableDataBlocks {
uint32_t size;
STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache
char *pData;
-
- SParsedDataColInfo boundColumnInfo;
+ bool cloned;
+
+ SParsedDataColInfo boundColumnInfo;
// for parameter ('?') binding
- uint32_t numOfAllocedParams;
- uint32_t numOfParams;
- SParamInfo *params;
+ uint32_t numOfAllocedParams;
+ uint32_t numOfParams;
+ SParamInfo * params;
+ SMemRowBuilder rowBuilder;
} STableDataBlocks;
-typedef struct SQueryInfo {
- int16_t command; // the command may be different for each subclause, so keep it seperately.
- uint32_t type; // query/insert type
- STimeWindow window; // the whole query time window
-
- SInterval interval; // tumble time window
- SSessionWindow sessionWindow; // session time window
-
- SSqlGroupbyExpr groupbyExpr; // group by tags info
- SArray * colList; // SArray
- SFieldInfo fieldsInfo;
- SArray * exprList; // SArray
- SLimitVal limit;
- SLimitVal slimit;
- STagCond tagCond;
- SOrderVal order;
- int16_t fillType; // final result fill type
- int16_t numOfTables;
- STableMetaInfo **pTableMetaInfo;
- struct STSBuf *tsBuf;
- int64_t * fillVal; // default value for fill
- char * msg; // pointer to the pCmd->payload to keep error message temporarily
- int64_t clauseLimit; // limit for current sub clause
-
- int64_t prjOffset; // offset value in the original sql expression, only applied at client side
- int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit
-
- int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
- int16_t resColumnId; // result column id
- bool distinctTag; // distinct tag or not
- int32_t round; // 0/1/....
- int32_t bufLen;
- char* buf;
- int32_t havingFieldNum;
-} SQueryInfo;
+typedef struct {
+ STableMeta *pTableMeta;
+ SArray *vgroupIdList;
+// SVgroupsInfo *pVgroupsInfo;
+} STableMetaVgroupInfo;
+
+typedef struct SInsertStatementParam {
+ SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
+ int32_t numOfTables; // number of tables in table name list
+ SHashObj *pTableBlockHashList; // data block for each table
+ SArray *pDataBlocks; // SArray. Merged submit block for each vgroup
+ int8_t schemaAttached; // denote if submit block is built with table schema or not
+ uint8_t payloadType; // EPayloadType. 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
+ STagData tagData; // NOTE: pTagData->data is used as a variant length array
+
+ int32_t batchSize; // for parameter ('?') binding and batch processing
+ int32_t numOfParams;
+
+ char msg[512]; // error message
+ uint32_t insertType; // insert data from [file|sql statement| bound statement]
+ uint64_t objectId; // sql object id
+ char *sql; // current sql statement position
+} SInsertStatementParam;
+
+typedef enum {
+ PAYLOAD_TYPE_KV = 0,
+ PAYLOAD_TYPE_RAW = 1,
+} EPayloadType;
+
+#define IS_RAW_PAYLOAD(t) \
+ (((int)(t)) == PAYLOAD_TYPE_RAW) // 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
+// TODO extract sql parser supporter
typedef struct {
int command;
uint8_t msgType;
+ SInsertStatementParam insertParam;
char reserve1[3]; // fix bus error on arm32
- bool autoCreated; // create table if it is not existed during retrieve table meta in mnode
+ int32_t count; // todo remove it
+ bool subCmd;
- union {
- int32_t count;
- int32_t numOfTablesInSubmit;
- };
-
- uint32_t insertType; // TODO remove it
- int32_t clauseIndex; // index of multiple subclause query
-
- char * curSql; // current sql, resume position of sql after parsing paused
- int8_t parseFinished;
char reserve2[3]; // fix bus error on arm32
-
int16_t numOfCols;
char reserve3[2]; // fix bus error on arm32
uint32_t allocSize;
char * payload;
int32_t payloadLen;
- SQueryInfo **pQueryInfo;
- int32_t numOfClause;
- int32_t batchSize; // for parameter ('?') binding and batch processing
- int32_t numOfParams;
- int8_t dataSourceType; // load data from file or not
- char reserve4[3]; // fix bus error on arm32
- int8_t submitSchema; // submit block is built with table schema
- char reserve5[3]; // fix bus error on arm32
- STagData tagData; // NOTE: pTagData->data is used as a variant length array
-
- SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
- int32_t numOfTables;
-
- SHashObj *pTableBlockHashList; // data block for each table
- SArray *pDataBlocks; // SArray. Merged submit block for each vgroup
+ SHashObj *pTableMetaMap; // local buffer to keep the queried table meta, before validating the AST
+ SQueryInfo *pQueryInfo;
+ SQueryInfo *active; // current active query info
+ int32_t batchSize; // for parameter ('?') binding and batch processing
+ int32_t resColumnId;
} SSqlCmd;
typedef struct SResRec {
@@ -327,13 +312,14 @@ typedef struct {
char * data;
TAOS_ROW tsrow;
TAOS_ROW urow;
+ bool dataConverted;
int32_t* length; // length for each field for current row
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex* pColumnIndex;
TAOS_FIELD* final;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
- struct SLocalMerger *pLocalMerger;
+ struct SGlobalMerger *pMerger;
} SSqlRes;
typedef struct {
@@ -360,6 +346,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj
+ SReqOrigin from;
} STscObj;
typedef struct SSubqueryState {
@@ -382,6 +369,7 @@ typedef struct SSqlObj {
void * pStream;
void * pSubscription;
char * sqlstr;
+ void * pBuf; // table meta buffer
char parseRetry;
char retry;
char maxRetry;
@@ -390,9 +378,11 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
-
+ bool isBind;
+
SSubqueryState subState;
struct SSqlObj **pSubs;
+ struct SSqlObj *rootObj;
int64_t metaRid;
int64_t svgroupRid;
@@ -443,7 +433,7 @@ void tscInitMsgsFp();
int tsParseSql(SSqlObj *pSql, bool initial);
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
-int tscProcessSql(SSqlObj *pSql);
+int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo);
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex);
void tscAsyncResultOnError(SSqlObj *pSql);
@@ -457,8 +447,11 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
-void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
-void destroyTableNameList(SSqlCmd* pCmd);
+void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted);
+void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar);
+
+void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
+void destroyTableNameList(SInsertStatementParam* pInsertParam);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
@@ -468,6 +461,8 @@ void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
*/
void tscFreeSqlResult(SSqlObj *pSql);
+void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap);
+
/**
* free sql object, release allocated resource
* @param pObj
@@ -490,7 +485,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscImportDataFromFile(SSqlObj *pSql);
-void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
+struct SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id);
bool tscIsUpdateQuery(SSqlObj* pSql);
char* tscGetSqlStr(SSqlObj* pSql);
bool tscIsQueryWithLimit(SSqlObj* pSql);
@@ -499,68 +494,425 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
+int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql);
-int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
+int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
-int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
+int32_t tscValidateSqlInfo(SSqlObj *pSql, struct SSqlInfo *pInfo);
+
+int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
+extern int32_t sentinel;
+extern SHashObj *tscVgroupMap;
+extern SHashObj *tscTableMetaMap;
+extern SCacheObj *tscVgroupListBuf;
+
+extern int tscObjRef;
+extern void *tscTmr;
+extern void *tscQhandle;
+extern int tscKeepConn[];
+extern int tscRefId;
+extern int tscNumOfObj; // number of existed sqlObj in current process.
-static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
- SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
+extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
+
+void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
+int16_t getNewResColId(SSqlCmd* pCmd);
- int32_t type = pInfo->field.type;
- int32_t bytes = pInfo->field.bytes;
+int32_t schemaIdxCompar(const void *lhs, const void *rhs);
+int32_t boundIdxCompar(const void *lhs, const void *rhs);
+static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) {
+ ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize);
+ return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen;
+}
- char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
- UNUSED(pData);
+static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) {
+ if (isDataRow(row)) {
+ if (kvLen < (dataLen * KVRatioConvert)) {
+ memRowSetConvert(row);
+ }
+ } else if (kvLen > dataLen) {
+ memRowSetConvert(row);
+ }
+}
-// user defined constant value output columns
- if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
- if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
- pData = pInfo->pSqlExpr->param[1].pz;
- pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
- pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
- } else {
- assert(bytes == tDataTypes[type].bytes);
+static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) {
+ memRowSetType(row, memRowType);
+ if (isDataRowT(memRowType)) {
+ dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion);
+ dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen));
+ } else {
+ ASSERT(nBoundCols > 0);
+ memRowSetKvVersion(row, pBlock->pTableMeta->sversion);
+ kvRowSetNCols(memRowKvBody(row), nBoundCols);
+ kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
+ }
+}
+/**
+ * TODO: Move to tdataformat.h and refactor when STSchema available.
+ * - fetch flen and toffset from STSChema and remove param spd
+ */
+static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols,
+ SParsedDataColInfo *spd) {
+ ASSERT(isKvRow(src));
+ SKVRow kvRow = memRowKvBody(src);
+ SDataRow dataRow = memRowDataBody(dest);
+
+ memRowSetType(dest, SMEM_ROW_DATA);
+ dataRowSetVersion(dataRow, memRowKvVersion(src));
+ dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen));
+
+ int32_t kvIdx = 0;
+ for (int i = 0; i < nCols; ++i) {
+ SSchema *schema = pSchema + i;
+ void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx);
+ tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type,
+ (spd->cols + i)->toffset);
+ }
+}
- pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64;
- pRes->length[columnIndex] = bytes;
+// TODO: Move to tdataformat.h and refactor when STSchema available.
+static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols,
+ SParsedDataColInfo *spd) {
+ ASSERT(isDataRow(src));
+
+ SDataRow dataRow = memRowDataBody(src);
+ SKVRow kvRow = memRowKvBody(dest);
+
+ memRowSetType(dest, SMEM_ROW_KV);
+ memRowSetKvVersion(kvRow, dataRowVersion(dataRow));
+ kvRowSetNCols(kvRow, nBoundCols);
+ kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
+
+ int32_t toffset = 0, kvOffset = 0;
+ for (int i = 0; i < nCols; ++i) {
+ if ((spd->cols + i)->valStat == VAL_STAT_HAS) {
+ SSchema *schema = pSchema + i;
+ toffset = (spd->cols + i)->toffset;
+ void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE);
+ tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset);
+ kvOffset += sizeof(SColIdx);
}
+ }
+}
+
+// TODO: Move to tdataformat.h and refactor when STSchema available.
+static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) {
+ STableMeta * pTableMeta = pBlock->pTableMeta;
+ STableComInfo tinfo = tscGetTableInfo(pTableMeta);
+ SSchema * pSchema = tscGetTableSchema(pTableMeta);
+ SParsedDataColInfo *spd = &pBlock->boundColumnInfo;
+
+ ASSERT(dest != src);
+
+ if (isDataRow(src)) {
+ // TODO: Can we use pBlock -> numOfParam directly?
+ ASSERT(spd->numOfBound > 0);
+ convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd);
} else {
- if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
- int32_t realLen = varDataLen(pData);
- assert(realLen <= bytes - VARSTR_HEADER_SIZE);
+ convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd);
+ }
+}
- pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : (unsigned char*)((tstr *)pData)->data;
- if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
- *(pData + realLen + VARSTR_HEADER_SIZE) = 0;
- }
+static bool isNullStr(SStrToken *pToken) {
+ return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
+ (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
+}
- pRes->length[columnIndex] = realLen;
- } else {
- assert(bytes == tDataTypes[type].bytes);
+static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
+ errno = 0;
+ *value = strtold(pToken->z, endPtr);
- pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
- pRes->length[columnIndex] = bytes;
- }
+ // not a valid integer number, return error
+ if ((*endPtr - pToken->z) != pToken->n) {
+ return TK_ILLEGAL;
}
+
+ return pToken->type;
}
-extern int32_t sentinel;
-extern SHashObj *tscVgroupMap;
-extern SHashObj *tscTableMetaInfo;
+static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
+static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
-extern int tscObjRef;
-extern void *tscTmr;
-extern void *tscQhandle;
-extern int tscKeepConn[];
-extern int tscRefId;
-extern int tscNumOfObj; // number of existed sqlObj in current process.
+static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str,
+ bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId,
+ int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) {
+ int64_t iv;
+ int32_t ret;
+ char * endptr = NULL;
-extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
+ if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
+ return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
+ }
-void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
-int16_t getNewResColId(SQueryInfo* pQueryInfo);
+ switch (pSchema->type) {
+ case TSDB_DATA_TYPE_BOOL: { // bool
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
+ if (strncmp(pToken->z, "true", pToken->n) == 0) {
+ tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ } else if (strncmp(pToken->z, "false", pToken->n) == 0) {
+ tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
+ }
+ } else if (pToken->type == TK_INTEGER) {
+ iv = strtoll(pToken->z, NULL, 10);
+ tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
+ dataLen, kvLen, compareStat);
+ } else if (pToken->type == TK_FLOAT) {
+ double dv = strtod(pToken->z, NULL);
+ tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset,
+ dataLen, kvLen, compareStat);
+ } else {
+ return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
+ }
+ }
+ break;
+ }
+
+ case TSDB_DATA_TYPE_TINYINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
+ } else if (!IS_VALID_TINYINT(iv)) {
+ return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
+ }
+
+ uint8_t tmpVal = (uint8_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_UTINYINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
+ } else if (!IS_VALID_UTINYINT(iv)) {
+ return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
+ }
+
+ uint8_t tmpVal = (uint8_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
+ } else if (!IS_VALID_SMALLINT(iv)) {
+ return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
+ }
+
+ int16_t tmpVal = (int16_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
+ } else if (!IS_VALID_USMALLINT(iv)) {
+ return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
+ }
+
+ uint16_t tmpVal = (uint16_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
+ } else if (!IS_VALID_INT(iv)) {
+ return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
+ }
+
+ int32_t tmpVal = (int32_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
+ } else if (!IS_VALID_UINT(iv)) {
+ return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
+ }
+
+ uint32_t tmpVal = (uint32_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
+ } else if (!IS_VALID_BIGINT(iv)) {
+ return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
+ }
+
+ tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
+ } else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
+ return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
+ }
+
+ uint64_t tmpVal = (uint64_t)iv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ double dv;
+ if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
+ return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
+ }
+
+ if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) ||
+ isnan(dv)) {
+ return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
+ }
+
+ float tmpVal = (float)dv;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ if (isNullStr(pToken)) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ double dv;
+ if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
+ return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
+ }
+
+ if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
+ return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
+ }
+
+ tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ // binary data cannot be null-terminated char string, otherwise the last char of the string is lost
+ if (pToken->type == TK_NULL) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else { // too long values will return invalid sql, not be truncated automatically
+ if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
+ return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
+ }
+ // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
+ char *rowEnd = memRowEnd(row);
+ STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n);
+ tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ if (pToken->type == TK_NULL) {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ } else {
+ // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
+ int32_t output = 0;
+ char * rowEnd = memRowEnd(row);
+ if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE,
+ &output)) {
+ char buf[512] = {0};
+ snprintf(buf, tListLen(buf), "%s", strerror(errno));
+ return tscInvalidOperationMsg(msg, buf, pToken->z);
+ }
+ varDataSetLen(rowEnd, output);
+ tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ if (pToken->type == TK_NULL) {
+ if (primaryKey) {
+ // When building SKVRow primaryKey, we should not skip even with NULL value.
+ int64_t tmpVal = 0;
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ } else {
+ tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen,
+ compareStat);
+ }
+ } else {
+ int64_t tmpVal;
+ if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
+ return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
+ }
+ tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat);
+ }
+
+ break;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
#ifdef __cplusplus
}
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index b3060e2c820d7bbb405bf6e96a4bf8af8ed0ec55..7181c658ddcdfde3efe7df3c0784c20f18bd4c03 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -51,10 +51,10 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
- * Method: getResultTimePrecision
- * Signature: (J)J
+ * Method: getResultTimePrecisionImp
+ * Signature: (JJ)I
*/
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecisionImp
(JNIEnv *, jobject, jlong, jlong);
/*
@@ -100,7 +100,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: isUpdateQueryImp
- * Signature: (J)J
+ * Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
@@ -185,6 +185,52 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp
(JNIEnv *, jobject, jlong, jbyteArray);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: prepareStmtImp
+ * Signature: ([BJ)I
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp
+ (JNIEnv *, jobject, jbyteArray, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setBindTableNameImp
+ * Signature: (JLjava/lang/String;J)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp
+ (JNIEnv *, jobject, jlong, jstring, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: bindColDataImp
+ * Signature: (J[B[B[BIIIIJ)J
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp
+(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray, jbyteArray, jint, jint, jint, jint, jlong);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: executeBatchImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: closeStmt
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con);
+
+/**
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setTableNameTagsImp
+ * Signature: (JLjava/lang/String;I[B[B[B[BJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp
+ (JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 7447e36ac9cf1074db0c62077be07b6a41a99256..506c8d64b9f4213713656ecd08612a103e0b1b2d 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -20,12 +20,42 @@
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
-#define jniFatal(...) { if (jniDebugFlag & DEBUG_FATAL) { taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniError(...) { if (jniDebugFlag & DEBUG_ERROR) { taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniWarn(...) { if (jniDebugFlag & DEBUG_WARN) { taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniInfo(...) { if (jniDebugFlag & DEBUG_INFO) { taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }}
-#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }}
+#define jniFatal(...) \
+ { \
+ if (jniDebugFlag & DEBUG_FATAL) { \
+ taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniError(...) \
+ { \
+ if (jniDebugFlag & DEBUG_ERROR) { \
+ taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniWarn(...) \
+ { \
+ if (jniDebugFlag & DEBUG_WARN) { \
+ taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniInfo(...) \
+ { \
+ if (jniDebugFlag & DEBUG_INFO) { \
+ taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniDebug(...) \
+ { \
+ if (jniDebugFlag & DEBUG_DEBUG) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniTrace(...) \
+ { \
+ if (jniDebugFlag & DEBUG_TRACE) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
int __init = 0;
@@ -60,14 +90,14 @@ jmethodID g_blockdataSetByteArrayFp;
jmethodID g_blockdataSetNumOfRowsFp;
jmethodID g_blockdataSetNumOfColsFp;
-#define JNI_SUCCESS 0
-#define JNI_TDENGINE_ERROR -1
+#define JNI_SUCCESS 0
+#define JNI_TDENGINE_ERROR -1
#define JNI_CONNECTION_NULL -2
#define JNI_RESULT_SET_NULL -3
#define JNI_NUM_OF_FIELDS_0 -4
-#define JNI_SQL_NULL -5
-#define JNI_FETCH_END -6
-#define JNI_OUT_OF_MEMORY -7
+#define JNI_SQL_NULL -5
+#define JNI_FETCH_END -6
+#define JNI_OUT_OF_MEMORY -7
static void jniGetGlobalMethod(JNIEnv *env) {
// make sure init function executed once
@@ -113,7 +143,7 @@ static void jniGetGlobalMethod(JNIEnv *env) {
g_rowdataSetFloatFp = (*env)->GetMethodID(env, g_rowdataClass, "setFloat", "(IF)V");
g_rowdataSetDoubleFp = (*env)->GetMethodID(env, g_rowdataClass, "setDouble", "(ID)V");
g_rowdataSetStringFp = (*env)->GetMethodID(env, g_rowdataClass, "setString", "(ILjava/lang/String;)V");
- g_rowdataSetTimestampFp = (*env)->GetMethodID(env, g_rowdataClass, "setTimestamp", "(IJ)V");
+ g_rowdataSetTimestampFp = (*env)->GetMethodID(env, g_rowdataClass, "setTimestamp", "(IJI)V");
g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V");
(*env)->DeleteLocalRef(env, rowdataClass);
@@ -129,13 +159,13 @@ static void jniGetGlobalMethod(JNIEnv *env) {
}
static int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
- if ((TAOS*) conn == NULL) {
+ if ((TAOS *)conn == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
- if ((TAOS_RES *) res == NULL) {
- jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn);
+ if ((TAOS_RES *)res == NULL) {
+ jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS *)conn);
return JNI_RESULT_SET_NULL;
}
@@ -216,7 +246,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost,
jint jport, jstring jdbName, jstring juser,
jstring jpass) {
- jlong ret = 0;
+ jlong ret = 0;
const char *host = NULL;
const char *user = NULL;
const char *pass = NULL;
@@ -246,7 +276,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
jniDebug("jobj:%p, pass not specified, use default password", jobj);
}
- ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
+ ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
@@ -289,7 +319,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
jsize len = (*env)->GetArrayLength(env, jsql);
- char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ char *str = (char *)calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
@@ -315,16 +345,17 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
}
free(str);
- return (jlong) pSql;
+ return (jlong)pSql;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con,
+ jlong tres) {
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
- return (jint)taos_errno((TAOS_RES*) tres);
+ return (jint)taos_errno((TAOS_RES *)tres);
}
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) {
@@ -334,7 +365,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
@@ -359,7 +390,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(
SSqlObj *pSql = (TAOS_RES *)tres;
- return (tscIsUpdateQuery(pSql)? 1:0);
+ return (tscIsUpdateQuery(pSql) ? 1 : 0);
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con,
@@ -370,21 +401,22 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(
}
taos_free_result((void *)res);
- jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res);
+ jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS *)con, (void *)res);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
jint ret = taos_affected_rows((SSqlObj *)res);
- jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret);
+ jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res,
+ (int32_t)ret);
return ret;
}
@@ -392,13 +424,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp(JNIEnv *env, jobject jobj,
jlong con, jlong res,
jobject arrayListObj) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
- TAOS_RES* tres = (TAOS_RES*) res;
+ TAOS_RES * tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t num_fields = taos_num_fields(tres);
@@ -452,7 +484,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
int32_t numOfFields = taos_num_fields(result);
if (numOfFields == 0) {
- jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields);
+ jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void *)res, numOfFields);
return JNI_NUM_OF_FIELDS_0;
}
@@ -460,7 +492,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
if (row == NULL) {
int code = taos_errno(result);
if (code == TSDB_CODE_SUCCESS) {
- jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields);
+ jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void *)res,
+ numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon);
@@ -468,7 +501,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
}
- int32_t* length = taos_fetch_lengths(result);
+ int32_t *length = taos_fetch_lengths(result);
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
@@ -519,9 +552,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
jniFromNCharToByteArray(env, (char *)row[i], length[i]));
break;
}
- case TSDB_DATA_TYPE_TIMESTAMP:
- (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]));
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ int precision = taos_result_precision(result);
+ (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]), precision);
break;
+ }
default:
break;
}
@@ -531,7 +566,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con,
- jlong res, jobject rowobj) {
+ jlong res, jobject rowobj) {
TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
@@ -562,8 +597,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields);
for (int i = 0; i < numOfFields; i++) {
- (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows,
- jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows));
+ int bytes = fields[i].bytes;
+
+ if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_NCHAR) {
+ bytes += 2;
+ }
+ (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, bytes * numOfRows,
+ jniFromNCharToByteArray(env, (char *)row[i], bytes * numOfRows));
}
return JNI_SUCCESS;
@@ -583,7 +623,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con,
- jboolean restart, jstring jtopic, jstring jsql, jint jinterval) {
+ jboolean restart, jstring jtopic,
+ jstring jsql, jint jinterval) {
jlong sub = 0;
TAOS *taos = (TAOS *)con;
char *topic = NULL;
@@ -672,8 +713,16 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(J
return (*env)->NewStringUTF(env, (const char *)tsCharset);
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision(JNIEnv *env, jobject jobj, jlong con,
- jlong res) {
+/**
+ * Get Result Time Precision
+ * @param env vm
+ * @param jobj the TSDBJNIConnector java object
+ * @param con the c connection pointer
+ * @param res the TAOS_RES object, i.e. the SSqlObject
+ * @return precision 0:ms 1:us 2:ns
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj,
+ jlong con, jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
@@ -687,4 +736,294 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrec
}
return taos_result_precision(result);
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj,
+ jbyteArray jsql, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ if (jsql == NULL) {
+ jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ jsize len = (*env)->GetArrayLength(env, jsql);
+
+ char *str = (char *)calloc(1, sizeof(char) * (len + 1));
+ if (str == NULL) {
+ jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
+ return JNI_OUT_OF_MEMORY;
+ }
+
+ (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ TAOS_STMT *pStmt = taos_stmt_init(tscon);
+ int32_t code = taos_stmt_prepare(pStmt, str, len);
+ tfree(str);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ return (jlong)pStmt;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj,
+ jlong stmt, jstring jname,
+ jlong conn) {
+ TAOS *tsconn = (TAOS *)conn;
+ if (tsconn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
+ return JNI_SQL_NULL;
+ }
+
+ const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
+
+ int32_t code = taos_stmt_set_tbname((void *)stmt, name);
+ if (code != TSDB_CODE_SUCCESS) {
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, set stmt bind table name:%s", jobj, tsconn, name);
+ (*env)->ReleaseStringUTFChars(env, jname, name);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(
+ JNIEnv *env, jobject jobj, jlong stmt, jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList,
+ jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ // todo refactor
+ jsize len = (*env)->GetArrayLength(env, colDataList);
+ char *colBuf = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, colDataList, 0, len, (jbyte *)colBuf);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ len = (*env)->GetArrayLength(env, lengthList);
+ char *lengthArray = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, nullList);
+ char *nullArray = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ // bind multi-rows with only one invoke.
+ TAOS_MULTI_BIND *b = calloc(1, sizeof(TAOS_MULTI_BIND));
+
+ b->num = numOfRows;
+ b->buffer_type = dataType; // todo check data type
+ b->buffer_length = IS_VAR_DATA_TYPE(dataType) ? dataBytes : tDataTypes[dataType].bytes;
+ b->is_null = nullArray;
+ b->buffer = colBuf;
+ b->length = (int32_t *)lengthArray;
+
+ // set the length and is_null array
+ if (!IS_VAR_DATA_TYPE(dataType)) {
+ int32_t bytes = tDataTypes[dataType].bytes;
+ for (int32_t i = 0; i < numOfRows; ++i) {
+ b->length[i] = bytes;
+ }
+ }
+
+ int32_t code = taos_stmt_bind_single_param_batch(pStmt, b, colIndex);
+ tfree(b->length);
+ tfree(b->buffer);
+ tfree(b->is_null);
+ tfree(b);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt,
+ jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ taos_stmt_add_batch(pStmt);
+ int32_t code = taos_stmt_execute(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, batch execute", jobj, tscon);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt,
+ jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
+ return JNI_SQL_NULL;
+ }
+
+ int32_t code = taos_stmt_close(pStmt);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+
+ jniDebug("jobj:%p, conn:%p, stmt closed", jobj, tscon);
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(
+ JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList,
+ jbyteArray lengthList, jbyteArray nullList, jlong conn) {
+ TAOS *tsconn = (TAOS *)conn;
+ if (tsconn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
+ if (pStmt == NULL) {
+ jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
+ return JNI_SQL_NULL;
+ }
+
+ jsize len = (*env)->GetArrayLength(env, tags);
+ char *tagsData = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, tags, 0, len, (jbyte *)tagsData);
+ if ((*env)->ExceptionCheck(env)) {
+ // todo handle error
+ }
+
+ len = (*env)->GetArrayLength(env, lengthList);
+ int64_t *lengthArray = (int64_t *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, typeList);
+ char *typeArray = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ len = (*env)->GetArrayLength(env, nullList);
+ int32_t *nullArray = (int32_t *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray);
+ if ((*env)->ExceptionCheck(env)) {
+ }
+
+ const char *name = (*env)->GetStringUTFChars(env, tableName, NULL);
+ char * curTags = tagsData;
+
+ TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND));
+ for (int32_t i = 0; i < numOfTags; ++i) {
+ tagsBind[i].buffer_type = typeArray[i];
+ tagsBind[i].buffer = curTags;
+ tagsBind[i].is_null = &nullArray[i];
+ tagsBind[i].length = (uintptr_t *)&lengthArray[i];
+
+ curTags += lengthArray[i];
+ }
+
+ int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind);
+
+ int32_t nTags = (int32_t)numOfTags;
+ jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags);
+
+ tfree(tagsData);
+ tfree(lengthArray);
+ tfree(typeArray);
+ tfree(nullArray);
+ tfree(tagsBind);
+ (*env)->ReleaseStringUTFChars(env, tableName, name);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code));
+ return JNI_TDENGINE_ERROR;
+ }
+ return JNI_SUCCESS;
+}
+
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
+ jobjectArray lines, jlong conn) {
+ TAOS *taos = (TAOS *)conn;
+ if (taos == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+
+ int numLines = (*env)->GetArrayLength(env, lines);
+ char **c_lines = calloc(numLines, sizeof(char *));
+ if (c_lines == NULL) {
+ jniError("c_lines:%p, alloc memory failed", c_lines);
+ return JNI_OUT_OF_MEMORY;
+ }
+ for (int i = 0; i < numLines; ++i) {
+ jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i));
+ c_lines[i] = (char *)(*env)->GetStringUTFChars(env, line, 0);
+ }
+
+ int code = taos_insert_lines(taos, c_lines, numLines);
+
+ for (int i = 0; i < numLines; ++i) {
+ jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i));
+ (*env)->ReleaseStringUTFChars(env, line, c_lines[i]);
+ }
+
+ tfree(c_lines);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s", jobj, taos, tstrerror(code));
+
+ return JNI_TDENGINE_ERROR;
+ }
+ return code;
}
\ No newline at end of file
diff --git a/src/client/src/taos.def b/src/client/src/taos.def
index 43cd8190614facc207bbad0885360f23c8216c8a..7d3b8e80c20226c4a509c95ab5728f41852110f5 100644
--- a/src/client/src/taos.def
+++ b/src/client/src/taos.def
@@ -7,11 +7,16 @@ taos_connect_auth
taos_close
taos_stmt_init
taos_stmt_prepare
+taos_stmt_set_tbname_tags
+taos_stmt_set_tbname
+taos_stmt_is_insert
+taos_stmt_num_params
taos_stmt_bind_param
taos_stmt_add_batch
taos_stmt_execute
taos_stmt_use_result
taos_stmt_close
+taos_stmt_errstr
taos_query
taos_fetch_row
taos_result_precision
@@ -37,6 +42,4 @@ taos_consume
taos_unsubscribe
taos_open_stream
taos_close_stream
-taos_fetch_block
taos_load_table_info
-
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 3f2a54fbdc144df7c4d5a32496ec87af0c632a5e..1041034011001e590c373e0bae174e251b3ea234 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -22,7 +22,7 @@
#include "tscSubquery.h"
#include "tscUtil.h"
#include "tsched.h"
-#include "tschemautil.h"
+#include "qTableMeta.h"
#include "tsclient.h"
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
@@ -44,6 +44,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp;
pSql->fetchFp = fp;
+ pSql->rootObj = pSql;
registerSqlObj(pSql);
@@ -58,18 +59,25 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
- pCmd->curSql = pSql->sqlstr;
+ pCmd->resColumnId = TSDB_RES_COL_ID;
+ taosAcquireRef(tscObjRef, pSql->self);
int32_t code = tsParseSql(pSql, true);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ taosReleaseRef(tscObjRef, pSql->self);
+ return;
+ }
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscAsyncResultOnError(pSql);
+ taosReleaseRef(tscObjRef, pSql->self);
return;
}
- tscDoQuery(pSql);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ executeQuery(pSql, pQueryInfo);
+ taosReleaseRef(tscObjRef, pSql->self);
}
// TODO return the correct error code to client in tscQueueAsyncError
@@ -126,7 +134,8 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
*/
- if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
+ if (pCmd->active->sibling != NULL) {
+ pCmd->active = pCmd->active->sibling;
tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
return;
}
@@ -142,7 +151,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
}
// local merge has handle this situation during super table non-projection query.
- if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) {
+ if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE) {
pRes->numOfClauseTotal += pRes->numOfRows;
}
@@ -166,20 +175,23 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
} else {
pRes->code = numOfRows;
}
+ if (pRes->code == TSDB_CODE_SUCCESS) {
+ pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
+ }
tscAsyncResultOnError(pSql);
return;
}
pSql->fp = fp;
- if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
+ if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockForSubquery(pSql);
} else {
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
}
}
@@ -193,8 +205,8 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
}
-void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
- SSqlObj *pSql = (SSqlObj *)taosa;
+void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
+ SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL || pSql->signature != pSql) {
tscError("sql object is NULL");
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
@@ -206,21 +218,30 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
// user-defined callback function is stored in fetchFp
pSql->fetchFp = fp;
- pSql->fp = tscAsyncFetchRowsProxy;
+ pSql->fp = tscAsyncFetchRowsProxy;
+ pSql->param = param;
+
+ tscResetForNextRetrieve(pRes);
+
+ // handle outer query based on the already retrieved nest query results.
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
+ SSchedMsg schedMsg = {0};
+ schedMsg.fp = doRetrieveSubqueryData;
+ schedMsg.ahandle = (void *)pSql;
+ schedMsg.thandle = (void *)1;
+ schedMsg.msg = 0;
+ taosScheduleTask(tscQhandle, &schedMsg);
+ return;
+ }
if (pRes->qId == 0) {
- tscError("qhandle is NULL");
+ tscError("qhandle is invalid");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
- pSql->param = param;
-
tscAsyncResultOnError(pSql);
return;
}
- pSql->param = param;
- tscResetForNextRetrieve(pRes);
-
- // handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockForSubquery(pSql);
} else if (pRes->completed) {
@@ -232,7 +253,8 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
* all available virtual nodes in current clause has been checked already, now try the
* next one in the following union subclause
*/
- if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
+ if (pCmd->active->sibling != NULL) {
+ pCmd->active = pCmd->active->sibling; // todo refactor
tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
return;
}
@@ -245,18 +267,19 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
}
return;
- } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) {
+ } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE) {
// in case of show command, return no data
(*pSql->fetchFp)(param, pSql, 0);
} else {
assert(0);
}
} else { // current query is not completed, continue retrieve from node
- if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
+ if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
-
- tscProcessSql(pSql);
+
+ SQueryInfo* pQueryInfo1 = tscGetQueryInfo(&pSql->cmd);
+ tscBuildAndSendRequest(pSql, pQueryInfo1);
}
}
@@ -312,49 +335,6 @@ void tscAsyncResultOnError(SSqlObj* pSql) {
int tscSendMsgToServer(SSqlObj *pSql);
-static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) {
- // handle the invalid table error code for super table.
- // update the pExpr info, colList info, number of table columns
- // TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case.
- if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) {
- int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
- int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
-
- SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
- for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
- pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
-
- if (pExpr->colInfo.colIndex >= 0) {
- int32_t index = pExpr->colInfo.colIndex;
-
- if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) ||
- (TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < numOfCols || index >= (numOfCols + numOfTags)))) {
- return pSql->retryReason;
- }
-
- if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
- strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) {
- return pSql->retryReason;
- }
- }
- }
-
- // validate the table columns information
- for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
- SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
- if (pCol->colIndex.columnIndex >= numOfCols) {
- return pSql->retryReason;
- }
- }
- } else {
- // do nothing
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
if (pSql == NULL) return;
@@ -366,7 +346,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
pRes->code = code;
SSqlObj *sub = (SSqlObj*) res;
- const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
+ const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"multi-tableMeta";
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
goto _error;
@@ -374,136 +354,62 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscDebug("0x%"PRIx64" get %s successfully", pSql->self, msg);
if (pSql->pStream == NULL) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- // check if it is a sub-query of super table query first, if true, enter another routine
- if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
- tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send the corresponding query", pSql->self);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- code = tscGetTableMeta(pSql, pTableMetaInfo);
- assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
+ if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
+ tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
+ code = tsParseSql(pSql, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
- }
-
- assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
- code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo);
- if (code != TSDB_CODE_SUCCESS) {
+ } else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- // tscProcessSql can add error into async res
- tscProcessSql(pSql);
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else { // continue to process normal async query
- if (pCmd->parseFinished) {
- tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send corresponding query", pSql->self);
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- code = tscGetTableMeta(pSql, pTableMetaInfo);
-
- assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- }
-
- assert(pCmd->command != TSDB_SQL_INSERT);
-
- if (pCmd->command == TSDB_SQL_SELECT) {
- tscDebug("0x%"PRIx64" redo parse sql string and proceed", pSql->self);
- pCmd->parseFinished = false;
- tscResetSqlCmd(pCmd, true);
-
- code = tsParseSql(pSql, true);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- tscProcessSql(pSql);
- } else { // in all other cases, simple retry
- tscProcessSql(pSql);
- }
-
- taosReleaseRef(tscObjRef, pSql->self);
- return;
+ if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert
+ (*pSql->fp)(pSql->param, pSql, code);
+ } else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert
+ tscImportDataFromFile(pSql);
+ } else { // sql string insert
+ tscHandleMultivnodeInsert(pSql);
+ }
+ } else {
+ if (pSql->retryReason != TSDB_CODE_SUCCESS) {
+ tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self);
+ tscResetSqlCmd(pCmd, false);
+ pSql->retryReason = TSDB_CODE_SUCCESS;
} else {
- tscDebug("0x%"PRIx64" continue parse sql after get table meta", pSql->self);
-
- code = tsParseSql(pSql, false);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (pCmd->insertType == TSDB_QUERY_TYPE_STMT_INSERT) {
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- code = tscGetTableMeta(pSql, pTableMetaInfo);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else {
- assert(code == TSDB_CODE_SUCCESS);
- }
-
- (*pSql->fp)(pSql->param, pSql, code);
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- }
-
- // proceed to invoke the tscDoQuery();
+ tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);
}
- }
- } else { // stream computing
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
-
- code = tscGetTableMeta(pSql, pTableMetaInfo);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex);
+ code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, pSql->self);
return;
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
+
+ SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd);
+ executeQuery(pSql, pQueryInfo1);
}
- tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pSql->cmd.command);
- if (!pSql->cmd.parseFinished) {
+ taosReleaseRef(tscObjRef, pSql->self);
+ return;
+ } else { // stream computing
+ tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pCmd->command);
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ if (tscNumOfExprs(pQueryInfo) == 0) {
tsParseSql(pSql, false);
}
(*pSql->fp)(pSql->param, pSql, code);
-
taosReleaseRef(tscObjRef, pSql->self);
-
return;
}
- tscDoQuery(pSql);
-
- taosReleaseRef(tscObjRef, pSql->self);
-
- return;
-
_error:
pRes->code = code;
tscAsyncResultOnError(pSql);
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
new file mode 100644
index 0000000000000000000000000000000000000000..130abdf4a73ade3951070a100555d15270c265f2
--- /dev/null
+++ b/src/client/src/tscGlobalmerge.c
@@ -0,0 +1,1145 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+#include "texpr.h"
+#include "tlosertree.h"
+
+#include "tscGlobalmerge.h"
+#include "tscSubquery.h"
+#include "tscLog.h"
+#include "qUtil.h"
+
+#define COLMODEL_GET_VAL(data, schema, rowId, colId) \
+ (data + (schema)->pFields[colId].offset * ((schema)->capacity) + (rowId) * (schema)->pFields[colId].field.bytes)
+
+
+typedef struct SCompareParam {
+ SLocalDataSource **pLocalData;
+ tOrderDescriptor * pDesc;
+ int32_t num;
+ int32_t groupOrderType;
+} SCompareParam;
+
+static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
+ int32_t ret = 0;
+
+ size_t size = taosArrayGetSize(columnIndexList);
+ if (size > 0) {
+ ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
+ }
+
+ // if ret == 0, means the result belongs to the same group
+ return (ret == 0);
+}
+
+static int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
+ int32_t pLeftIdx = *(int32_t *)pLeft;
+ int32_t pRightIdx = *(int32_t *)pRight;
+
+ SCompareParam * pParam = (SCompareParam *)param;
+ tOrderDescriptor * pDesc = pParam->pDesc;
+ SLocalDataSource **pLocalData = pParam->pLocalData;
+
+ /* this input is exhausted, set the special value to denote this */
+ if (pLocalData[pLeftIdx]->rowIdx == -1) {
+ return 1;
+ }
+
+ if (pLocalData[pRightIdx]->rowIdx == -1) {
+ return -1;
+ }
+
+ if (pParam->groupOrderType == TSDB_ORDER_DESC) { // desc
+ return compare_d(pDesc, pParam->num, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data,
+ pParam->num, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data);
+ } else {
+ return compare_a(pDesc, pParam->num, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data,
+ pParam->num, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data);
+ }
+}
+
+int32_t tscCreateGlobalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
+ SQueryInfo* pQueryInfo, SGlobalMerger **pMerger, int64_t id) {
+ if (pMemBuffer == NULL) {
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ tscError("0x%"PRIx64" %p pMemBuffer is NULL", id, pMemBuffer);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ if (pDesc->pColumnModel == NULL) {
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ tscError("0x%"PRIx64" no local buffer or intermediate result format model", id);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ int32_t numOfFlush = 0;
+ for (int32_t i = 0; i < numOfBuffer; ++i) {
+ int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
+ if (len == 0) {
+ tscDebug("0x%"PRIx64" no data retrieved from orderOfVnode:%d", id, i + 1);
+ continue;
+ }
+
+ numOfFlush += len;
+ }
+
+ if (numOfFlush == 0 || numOfBuffer == 0) {
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ tscDebug("0x%"PRIx64" no data to retrieve", id);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
+ tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", id, pDesc->pColumnModel->capacity,
+ pMemBuffer[0]->pageSize);
+
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ *pMerger = (SGlobalMerger *) calloc(1, sizeof(SGlobalMerger));
+ if ((*pMerger) == NULL) {
+ tscError("0x%"PRIx64" failed to create local merge structure, out of memory", id);
+
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ (*pMerger)->pExtMemBuffer = pMemBuffer;
+ (*pMerger)->pLocalDataSrc = calloc(numOfFlush, POINTER_BYTES);
+ assert((*pMerger)->pLocalDataSrc != NULL);
+
+ (*pMerger)->numOfBuffer = numOfFlush;
+ (*pMerger)->numOfVnode = numOfBuffer;
+
+ (*pMerger)->pDesc = pDesc;
+ tscDebug("0x%"PRIx64" the number of merged leaves is: %d", id, (*pMerger)->numOfBuffer);
+
+ int32_t idx = 0;
+ for (int32_t i = 0; i < numOfBuffer; ++i) {
+ int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength;
+
+ for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
+ SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
+ if (ds == NULL) {
+ tscError("0x%"PRIx64" failed to create merge structure", id);
+ tfree(*pMerger);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ (*pMerger)->pLocalDataSrc[idx] = ds;
+
+ ds->pMemBuffer = pMemBuffer[i];
+ ds->flushoutIdx = j;
+ ds->filePage.num = 0;
+ ds->pageId = 0;
+ ds->rowIdx = 0;
+
+ tscDebug("0x%"PRIx64" load data from disk into memory, orderOfVnode:%d, total:%d", id, i + 1, idx + 1);
+ tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0);
+#ifdef _DEBUG_VIEW
+ printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.num);
+ SSrcColumnInfo colInfo[256] = {0};
+ SQueryInfo * pQueryInfo = tscGetQueryInfo(pCmd);
+
+ tscGetSrcColumnInfo(colInfo, pQueryInfo);
+
+ tColModelDisplayEx(pDesc->pColumnModel, ds->filePage.data, ds->filePage.num,
+ pMemBuffer[0]->numOfElemsPerPage, colInfo);
+#endif
+
+ if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
+ tscDebug("0x%"PRIx64" flush data is empty, ignore %d flush record", id, idx);
+ tfree(ds);
+ continue;
+ }
+
+ idx += 1;
+ }
+ }
+
+ // no data actually, no need to merge result.
+ if (idx == 0) {
+ tscDebug("0x%"PRIx64" retrieved no data", id);
+ tscDestroyGlobalMergerEnv(pMemBuffer, pDesc, numOfBuffer);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ (*pMerger)->numOfBuffer = idx;
+
+ SCompareParam *param = malloc(sizeof(SCompareParam));
+ if (param == NULL) {
+ tfree((*pMerger));
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ param->pLocalData = (*pMerger)->pLocalDataSrc;
+ param->pDesc = (*pMerger)->pDesc;
+ param->num = (*pMerger)->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
+
+ param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
+
+ int32_t code = tLoserTreeCreate(&(*pMerger)->pLoserTree, (*pMerger)->numOfBuffer, param, treeComparator);
+ if ((*pMerger)->pLoserTree == NULL || code != TSDB_CODE_SUCCESS) {
+ tfree(param);
+ tfree((*pMerger));
+ return code;
+ }
+
+ (*pMerger)->rowSize = pMemBuffer[0]->nElemSize;
+
+ // todo fixed row size is larger than the minimum page size;
+ assert((*pMerger)->rowSize <= pMemBuffer[0]->pageSize);
+
+ if ((*pMerger)->pLoserTree == NULL) {
+ tfree((*pMerger)->pLoserTree);
+ tfree(param);
+ tfree((*pMerger));
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ // restore the limitation value at the last stage
+ if (pQueryInfo->orderProjectQuery) {
+ pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
+ pQueryInfo->limit.offset = pQueryInfo->prjOffset;
+ }
+
+ // we change the capacity of schema to denote that there is only one row in temp buffer
+ (*pMerger)->pDesc->pColumnModel->capacity = 1;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage,
+ int32_t orderType) {
+ if (pPage->num == 0) {
+ return 0;
+ }
+
+ assert(pPage->num <= pDesc->pColumnModel->capacity);
+
+ // sort before flush to disk, the data must be consecutively put on tFilePage.
+ if (pDesc->orderInfo.numOfCols > 0) {
+ tColDataQSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType);
+ }
+
+#ifdef _DEBUG_VIEW
+ printf("%" PRIu64 " rows data flushed to disk after been sorted:\n", pPage->num);
+ tColModelDisplay(pDesc->pColumnModel, pPage->data, pPage->num, pPage->num);
+#endif
+
+ // write to cache after being sorted
+ if (tExtMemBufferPut(pMemoryBuf, pPage->data, (int32_t)pPage->num) < 0) {
+ tscError("failed to save data in temporary buffer");
+ return -1;
+ }
+
+ pPage->num = 0;
+ return 0;
+}
+
+int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType) {
+ int32_t ret = 0;
+ if ((ret = tscFlushTmpBufferImpl(pMemoryBuf, pDesc, pPage, orderType)) != 0) {
+ return ret;
+ }
+
+ if ((ret = tExtMemBufferFlush(pMemoryBuf)) != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
+ int32_t numOfRows, int32_t orderType) {
+ SColumnModel *pModel = pDesc->pColumnModel;
+
+ if (pPage->num + numOfRows <= pModel->capacity) {
+ tColModelAppend(pModel, pPage, data, 0, numOfRows, numOfRows);
+ return 0;
+ }
+
+ // current buffer is overflow, flush data to extensive buffer
+ int32_t numOfRemainEntries = pModel->capacity - (int32_t)pPage->num;
+ tColModelAppend(pModel, pPage, data, 0, numOfRemainEntries, numOfRows);
+
+ // current buffer is full, need to flushed to disk
+ assert(pPage->num == pModel->capacity);
+ int32_t code = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType);
+ if (code != 0) {
+ return code;
+ }
+
+ int32_t remain = numOfRows - numOfRemainEntries;
+
+ while (remain > 0) {
+ int32_t numOfWriteElems = 0;
+ if (remain > pModel->capacity) {
+ numOfWriteElems = pModel->capacity;
+ } else {
+ numOfWriteElems = remain;
+ }
+
+ tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows);
+
+ if (pPage->num == pModel->capacity) {
+ if ((code = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ } else {
+ pPage->num = numOfWriteElems;
+ }
+
+ remain -= numOfWriteElems;
+ numOfRemainEntries += numOfWriteElems;
+ }
+
+ return 0;
+}
+
+void tscDestroyGlobalMerger(SGlobalMerger* pMerger) {
+ if (pMerger == NULL) {
+ return;
+ }
+
+ for (int32_t i = 0; i < pMerger->numOfBuffer; ++i) {
+ tfree(pMerger->pLocalDataSrc[i]);
+ }
+
+ pMerger->numOfBuffer = 0;
+ tscDestroyGlobalMergerEnv(pMerger->pExtMemBuffer, pMerger->pDesc, pMerger->numOfVnode);
+
+ pMerger->numOfCompleted = 0;
+
+ if (pMerger->pLoserTree) {
+ tfree(pMerger->pLoserTree->param);
+ tfree(pMerger->pLoserTree);
+ }
+
+ tfree(pMerger->buf);
+ tfree(pMerger->pLocalDataSrc);
+ free(pMerger);
+}
+
+static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo* pQueryInfo, SColumnModel *pModel) {
+ int32_t numOfGroupByCols = 0;
+
+ if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
+ numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols;
+ }
+
+ // primary timestamp column is involved in final result
+ if (pQueryInfo->interval.interval != 0 || pQueryInfo->orderProjectQuery) {
+ numOfGroupByCols++;
+ }
+
+ int32_t *orderColIndexList = (int32_t *)calloc(numOfGroupByCols, sizeof(int32_t));
+ if (orderColIndexList == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ if (numOfGroupByCols > 0) {
+
+ if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
+ int32_t numOfInternalOutput = (int32_t) tscNumOfExprs(pQueryInfo);
+
+ // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
+ for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i);
+ for(int32_t j = 0; j < numOfInternalOutput; ++j) {
+ SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j);
+
+ int32_t functionId = pExprInfo->base.functionId;
+ if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG)) {
+ orderColIndexList[i] = j;
+ break;
+ }
+ }
+ }
+
+ if (pQueryInfo->interval.interval != 0) {
+ // the first column is the timestamp, handles queries like "interval(10m) group by tags"
+ orderColIndexList[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX; //TODO ???
+ }
+ } else {
+ /*
+ * 1. the orderby ts asc/desc projection query for the super table
+ * 2. interval query without groupby clause
+ */
+ if (pQueryInfo->interval.interval != 0) {
+ orderColIndexList[0] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
+ } else {
+ size_t size = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < size; ++i) {
+ SExprInfo *pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ orderColIndexList[0] = i;
+ }
+ }
+ }
+
+ assert(pQueryInfo->order.orderColId == PRIMARYKEY_TIMESTAMP_COL_INDEX);
+ }
+ }
+
+ *pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
+ tfree(orderColIndexList);
+
+ if (*pOrderDesc == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+}
+
+int32_t tscCreateGlobalMergerEnv(SQueryInfo *pQueryInfo, tExtMemBuffer ***pMemBuffer, int32_t numOfSub,
+ tOrderDescriptor **pOrderDesc, uint32_t nBufferSizes, int64_t id) {
+ SSchema *pSchema = NULL;
+ SColumnModel *pModel = NULL;
+
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+
+ (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * numOfSub);
+ if (*pMemBuffer == NULL) {
+ tscError("0x%"PRIx64" failed to allocate memory", id);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ size_t size = tscNumOfExprs(pQueryInfo);
+
+ pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
+ if (pSchema == NULL) {
+ tscError("0x%"PRIx64" failed to allocate memory", id);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ int32_t rlen = 0;
+ for (int32_t i = 0; i < size; ++i) {
+ SExprInfo *pExpr = tscExprGet(pQueryInfo, i);
+
+ pSchema[i].bytes = pExpr->base.resBytes;
+ pSchema[i].type = (int8_t)pExpr->base.resType;
+ tstrncpy(pSchema[i].name, pExpr->base.aliasName, tListLen(pSchema[i].name));
+
+ rlen += pExpr->base.resBytes;
+ }
+
+ int32_t capacity = 0;
+ if (rlen != 0) {
+ capacity = nBufferSizes / rlen;
+ }
+
+ pModel = createColumnModel(pSchema, (int32_t)size, capacity);
+ tfree(pSchema);
+ if (pModel == NULL){
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ int32_t pg = DEFAULT_PAGE_SIZE;
+ int32_t overhead = sizeof(tFilePage);
+ while((pg - overhead) < pModel->rowSize * 2) {
+ pg *= 2;
+ }
+
+ assert(numOfSub <= pTableMetaInfo->vgroupList->numOfVgroups);
+ for (int32_t i = 0; i < numOfSub; ++i) {
+ (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel);
+ (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL;
+ }
+
+ if (createOrderDescriptor(pOrderDesc, pQueryInfo, pModel) != TSDB_CODE_SUCCESS) {
+ tfree(pModel);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+/**
+ * @param pMemBuffer
+ * @param pDesc
+ * @param numOfVnodes
+ */
+void tscDestroyGlobalMergerEnv(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, int32_t numOfVnodes) {
+ tOrderDescDestroy(pDesc);
+ for (int32_t i = 0; i < numOfVnodes; ++i) {
+ pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
+ }
+
+ tfree(pMemBuffer);
+}
+
+/**
+ *
+ * @param pMerger
+ * @param pOneInterDataSrc
+ * @param treeList
+ * @return the number of remain input source. if ret == 0, all data has been handled
+ */
+int32_t loadNewDataFromDiskFor(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
+ bool *needAdjustLoserTree) {
+ pOneInterDataSrc->rowIdx = 0;
+ pOneInterDataSrc->pageId += 1;
+
+ if ((uint32_t)pOneInterDataSrc->pageId <
+ pOneInterDataSrc->pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[pOneInterDataSrc->flushoutIdx].numOfPages) {
+ tExtMemBufferLoadData(pOneInterDataSrc->pMemBuffer, &(pOneInterDataSrc->filePage), pOneInterDataSrc->flushoutIdx,
+ pOneInterDataSrc->pageId);
+
+#if defined(_DEBUG_VIEW)
+ printf("new page load to buffer\n");
+ tColModelDisplay(pOneInterDataSrc->pMemBuffer->pColumnModel, pOneInterDataSrc->filePage.data,
+ pOneInterDataSrc->filePage.num, pOneInterDataSrc->pMemBuffer->pColumnModel->capacity);
+#endif
+ *needAdjustLoserTree = true;
+ } else {
+ pMerger->numOfCompleted += 1;
+
+ pOneInterDataSrc->rowIdx = -1;
+ pOneInterDataSrc->pageId = -1;
+ *needAdjustLoserTree = true;
+ }
+
+ return pMerger->numOfBuffer;
+}
+
+void adjustLoserTreeFromNewData(SGlobalMerger *pMerger, SLocalDataSource *pOneInterDataSrc,
+ SLoserTreeInfo *pTree) {
+ /*
+ * load a new data page into memory for intermediate dataset source,
+ * since it's last record in buffer has been chosen to be processed, as the winner of loser-tree
+ */
+ bool needToAdjust = true;
+ if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
+ loadNewDataFromDiskFor(pMerger, pOneInterDataSrc, &needToAdjust);
+ }
+
+ /*
+ * adjust loser tree otherwise, according to new candidate data
+ * if the loser tree is rebuild completed, we do not need to adjust
+ */
+ if (needToAdjust) {
+ int32_t leafNodeIdx = pTree->pNode[0].index + pMerger->numOfBuffer;
+
+#ifdef _DEBUG_VIEW
+ printf("before adjust:\t");
+ tLoserTreeDisplay(pTree);
+#endif
+
+ tLoserTreeAdjust(pTree, leafNodeIdx);
+
+#ifdef _DEBUG_VIEW
+ printf("\nafter adjust:\t");
+ tLoserTreeDisplay(pTree);
+ printf("\n");
+#endif
+ }
+}
+
+//TODO it is not ordered, fix it
+static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex, bool* hasPrev) {
+ int32_t size = (int32_t) taosArrayGetSize(pColumnList);
+
+ for(int32_t i = 0; i < size; ++i) {
+ SColIndex* index = taosArrayGet(pColumnList, i);
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index->colIndex);
+ assert(index->colId == pColInfo->info.colId);
+
+ memcpy(prevRow[i], pColInfo->pData + pColInfo->info.bytes * rowIndex, pColInfo->info.bytes);
+ }
+
+ (*hasPrev) = true;
+}
+
+// tsdb_func_tag function only produce one row of result. Therefore, we need to copy the
+// output value to multiple rows
+static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t numOfRows) {
+ if (numOfRows <= 1) {
+ return;
+ }
+
+ for (int32_t k = 0; k < numOfOutput; ++k) {
+ if (pCtx[k].functionId != TSDB_FUNC_TAG) {
+ continue;
+ }
+
+ char* src = pCtx[k].pOutput;
+ char* dst = pCtx[k].pOutput + pCtx[k].outputBytes;
+
+ // Let's start from the second row, as the first row has result value already.
+ for (int32_t i = 1; i < numOfRows; ++i) {
+ memcpy(dst, src, (size_t)pCtx[k].outputBytes);
+ dst += pCtx[k].outputBytes;
+ }
+ }
+}
+
+static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) {
+ for (int32_t j = 0; j < numOfExpr; ++j) {
+ pCtx[j].pInput = pDataPtr[j] + pCtx[j].inputBytes * rowIndex;
+ }
+
+ for (int32_t j = 0; j < numOfExpr; ++j) {
+ int32_t functionId = pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
+ doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
+ } else {
+ aAggs[functionId].mergeFunc(&pCtx[j]);
+ }
+ }
+}
+
+static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
+ for(int32_t j = 0; j < numOfExpr; ++j) {
+ int32_t functionId = pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
+ doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
+ } else {
+ aAggs[functionId].xFinalize(&pCtx[j]);
+ }
+ }
+}
+
+static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
+ SMultiwayMergeInfo* pInfo = pOperator->info;
+ SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
+
+ char** addrPtr = calloc(pBlock->info.numOfCols, POINTER_BYTES);
+ for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ addrPtr[i] = pCtx[i].pInput;
+ pCtx[i].size = 1;
+ }
+
+ for(int32_t i = 0; i < pBlock->info.rows; ++i) {
+ if (pInfo->hasPrev) {
+ if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
+ } else {
+ doFinalizeResultImpl(pInfo, pCtx, numOfExpr);
+
+ int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput);
+ setTagValueForMultipleRows(pCtx, pOperator->numOfOutput, numOfRows);
+
+ pInfo->binfo.pRes->info.rows += numOfRows;
+
+ for(int32_t j = 0; j < numOfExpr; ++j) {
+ pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows);
+ if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) {
+ if(j>0) pCtx[j].ptsOutputBuf = pCtx[j-1].pOutput;
+ }
+ }
+
+ for(int32_t j = 0; j < numOfExpr; ++j) {
+ if (pCtx[j].functionId < 0) {
+ continue;
+ }
+
+ aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
+ }
+
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
+ }
+ } else {
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
+ }
+
+ savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev);
+ }
+
+ {
+ for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ pCtx[i].pInput = addrPtr[i];
+ }
+ }
+
+ tfree(addrPtr);
+}
+
+static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
+ return (pMerger->numOfBuffer == pMerger->numOfCompleted);
+}
+
+SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint64_t id) {
+ SGlobalMerger *pMerger = calloc(1, sizeof(SGlobalMerger));
+ if (pMerger == NULL) {
+ tscDebug("0x%"PRIx64" free local reducer finished", id);
+ return NULL;
+ }
+
+ /*
+ * One more byte space is required, since the sprintf function needs one additional space to put '\0' at
+ * the end of string
+ */
+ size_t size = numOfRes * rowLen + 1;
+ pMerger->buf = calloc(1, size);
+ return pMerger;
+}
+
+// todo remove it
+int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
+ int32_t maxRowSize = MAX(rowSize, finalRowSize);
+ char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
+
+ size_t size = tscNumOfFields(pQueryInfo);
+ SArithmeticSupport arithSup = {0};
+
+ // todo refactor
+ arithSup.offset = 0;
+ arithSup.numOfCols = (int32_t) tscNumOfExprs(pQueryInfo);
+ arithSup.exprList = pQueryInfo->exprList;
+ arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
+
+ for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, k);
+ arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->base.offset);
+ }
+
+ int32_t offset = 0;
+
+ for (int i = 0; i < size; ++i) {
+ SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
+
+ // calculate the result from several other columns
+ if (pSup->pExpr->pExpr != NULL) {
+ arithSup.pExprInfo = pSup->pExpr;
+ arithmeticTreeTraverse(arithSup.pExprInfo->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
+ } else {
+ SExprInfo* pExpr = pSup->pExpr;
+ memcpy(pbuf + pOutput->num * offset, pExpr->base.offset * pOutput->num + pOutput->data, (size_t)(pExpr->base.resBytes * pOutput->num));
+ }
+
+ offset += pSup->field.bytes;
+ }
+
+ memcpy(pOutput->data, pbuf, (size_t)(pOutput->num * offset));
+
+ tfree(pbuf);
+ tfree(arithSup.data);
+
+ return offset;
+}
+
+static void appendOneRowToDataBlock(SSDataBlock *pBlock, char *buf, SColumnModel *pModel, int32_t rowIndex,
+ int32_t maxRows) {
+ for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i);
+ char* p = pColInfo->pData + pBlock->info.rows * pColInfo->info.bytes;
+
+ char *src = COLMODEL_GET_VAL(buf, pModel, rowIndex, i);
+ memmove(p, src, pColInfo->info.bytes);
+ }
+
+ pBlock->info.rows += 1;
+}
+
+SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SMultiwayMergeInfo *pInfo = pOperator->info;
+
+ SGlobalMerger *pMerger = pInfo->pMerge;
+ SLoserTreeInfo *pTree = pMerger->pLoserTree;
+
+ pInfo->binfo.pRes->info.rows = 0;
+
+ while(1) {
+ if (isAllSourcesCompleted(pMerger)) {
+ break;
+ }
+
+#ifdef _DEBUG_VIEW
+ printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
+#endif
+
+ assert((pTree->pNode[0].index < pMerger->numOfBuffer) && (pTree->pNode[0].index >= 0));
+
+ // chosen from loser tree
+ SLocalDataSource *pOneDataSrc = pMerger->pLocalDataSrc[pTree->pNode[0].index];
+ bool sameGroup = true;
+ if (pInfo->hasPrev) {
+
+ // todo refactor extract method
+ int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
+
+ // if this row belongs to current result set group
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColIndex * pIndex = taosArrayGet(pInfo->orderColumnList, i);
+ SColumnInfoData *pColInfo = taosArrayGet(pInfo->binfo.pRes->pDataBlock, pIndex->colIndex);
+
+ char *newRow = COLMODEL_GET_VAL(pOneDataSrc->filePage.data, pOneDataSrc->pMemBuffer->pColumnModel,
+ pOneDataSrc->rowIdx, pIndex->colIndex);
+
+ char *data = pInfo->prevRow[i];
+ int32_t ret = columnValueAscendingComparator(data, newRow, pColInfo->info.type, pColInfo->info.bytes);
+ if (ret == 0) {
+ continue;
+ } else {
+ sameGroup = false;
+ *newgroup = true;
+ break;
+ }
+ }
+ }
+
+ if (!sameGroup || !pInfo->hasPrev) { //save the data
+ int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColIndex * pIndex = taosArrayGet(pInfo->orderColumnList, i);
+ SColumnInfoData *pColInfo = taosArrayGet(pInfo->binfo.pRes->pDataBlock, pIndex->colIndex);
+
+ char *curCol = COLMODEL_GET_VAL(pOneDataSrc->filePage.data, pOneDataSrc->pMemBuffer->pColumnModel,
+ pOneDataSrc->rowIdx, pIndex->colIndex);
+ memcpy(pInfo->prevRow[i], curCol, pColInfo->info.bytes);
+ }
+
+ pInfo->hasPrev = true;
+ }
+
+ if (!sameGroup && pInfo->binfo.pRes->info.rows > 0) {
+ return pInfo->binfo.pRes;
+ }
+
+ appendOneRowToDataBlock(pInfo->binfo.pRes, pOneDataSrc->filePage.data, pOneDataSrc->pMemBuffer->pColumnModel,
+ pOneDataSrc->rowIdx, pOneDataSrc->pMemBuffer->pColumnModel->capacity);
+
+#if defined(_DEBUG_VIEW)
+ printf("chosen row:\t");
+ SSrcColumnInfo colInfo[256] = {0};
+ tscGetSrcColumnInfo(colInfo, pQueryInfo);
+
+ tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
+#endif
+
+ pOneDataSrc->rowIdx += 1;
+ adjustLoserTreeFromNewData(pMerger, pOneDataSrc, pTree);
+
+ if (pInfo->binfo.pRes->info.rows >= pInfo->bufCapacity) {
+ return pInfo->binfo.pRes;
+ }
+ }
+
+ pOperator->status = OP_EXEC_DONE;
+ return (pInfo->binfo.pRes->info.rows > 0)? pInfo->binfo.pRes:NULL;
+}
+
+static bool isSameGroup(SArray* orderColumnList, SSDataBlock* pBlock, char** dataCols) {
+ int32_t numOfCols = (int32_t) taosArrayGetSize(orderColumnList);
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColIndex *pIndex = taosArrayGet(orderColumnList, i);
+
+ SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex);
+ assert(pIndex->colId == pColInfo->info.colId);
+
+ char *data = dataCols[i];
+ int32_t ret = columnValueAscendingComparator(data, pColInfo->pData, pColInfo->info.type, pColInfo->info.bytes);
+ if (ret == 0) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SMultiwayMergeInfo *pAggInfo = pOperator->info;
+ SOperatorInfo *upstream = pOperator->upstream[0];
+
+ *newgroup = false;
+ bool handleData = false;
+ pAggInfo->binfo.pRes->info.rows = 0;
+
+ {
+ if (pAggInfo->hasDataBlockForNewGroup) {
+ pAggInfo->hasPrev = false; // now we start from a new group data set.
+
+ // not belongs to the same group, return the result of current group;
+ setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pAggInfo->pExistBlock, TSDB_ORDER_ASC);
+ updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pAggInfo->pExistBlock->info.rows);
+
+ { // reset output buffer
+ for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
+ SQLFunctionCtx* pCtx = &pAggInfo->binfo.pCtx[j];
+ if (pCtx->functionId < 0) {
+ clearOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity);
+ continue;
+ }
+
+ aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo);
+ }
+ }
+
+ doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pAggInfo->pExistBlock);
+
+ savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pAggInfo->pExistBlock, 0,
+ &pAggInfo->hasGroupColData);
+ pAggInfo->pExistBlock = NULL;
+ pAggInfo->hasDataBlockForNewGroup = false;
+ handleData = true;
+ *newgroup = true;
+ }
+ }
+
+ SSDataBlock* pBlock = NULL;
+ while(1) {
+ bool prev = *newgroup;
+ publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ pBlock = upstream->exec(upstream, newgroup);
+ publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC);
+ if (pBlock == NULL) {
+ *newgroup = prev;
+ break;
+ }
+
+ bool sameGroup = true;
+ if (pAggInfo->hasGroupColData) {
+ sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
+ if (!sameGroup && !pAggInfo->multiGroupResults) {
+ *newgroup = true;
+ pAggInfo->hasDataBlockForNewGroup = true;
+ pAggInfo->pExistBlock = pBlock;
+ savePrevOrderColumns(pAggInfo->prevRow, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasPrev);
+ break;
+ }
+ }
+
+ // not belongs to the same group, return the result of current group
+ setInputDataBlock(pOperator, pAggInfo->binfo.pCtx, pBlock, TSDB_ORDER_ASC);
+ updateOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor);
+
+ doExecuteFinalMerge(pOperator, pOperator->numOfOutput, pBlock);
+ savePrevOrderColumns(pAggInfo->currentGroupColData, pAggInfo->groupColumnList, pBlock, 0, &pAggInfo->hasGroupColData);
+ handleData = true;
+ }
+
+ if (handleData) { // data in current group is all handled
+ doFinalizeResultImpl(pAggInfo, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
+
+ int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
+
+ pAggInfo->binfo.pRes->info.rows += numOfRows;
+ setTagValueForMultipleRows(pAggInfo->binfo.pCtx, pOperator->numOfOutput, numOfRows);
+ }
+
+ SSDataBlock* pRes = pAggInfo->binfo.pRes;
+ {
+ SColumnInfoData* pInfoData = taosArrayGet(pRes->pDataBlock, 0);
+
+ if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pRes->info.rows > 0) {
+ STimeWindow* w = &pRes->info.window;
+
+ w->skey = *(int64_t*)pInfoData->pData;
+ w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pRes->info.rows - 1));
+
+ if (pOperator->pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_DESC) {
+ SWAP(w->skey, w->ekey, TSKEY);
+ assert(w->skey <= w->ekey);
+ }
+ }
+ }
+
+ return (pRes->info.rows != 0)? pRes:NULL;
+}
+
+static void doHandleDataInCurrentGroup(SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t rowIndex) {
+ if (pInfo->currentOffset > 0) {
+ pInfo->currentOffset -= 1;
+ } else {
+ // discard the data rows in current group
+ if (pInfo->limit.limit < 0 || (pInfo->limit.limit >= 0 && pInfo->rowsTotal < pInfo->limit.limit)) {
+ size_t num1 = taosArrayGetSize(pInfo->pRes->pDataBlock);
+ for (int32_t i = 0; i < num1; ++i) {
+ SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+ SColumnInfoData *pDstInfoData = taosArrayGet(pInfo->pRes->pDataBlock, i);
+
+ SColumnInfo *pColInfo = &pColInfoData->info;
+
+ char *pSrc = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
+ char *pDst = (char *)pDstInfoData->pData + (pInfo->pRes->info.rows * pColInfo->bytes);
+
+ memcpy(pDst, pSrc, pColInfo->bytes);
+ }
+
+ pInfo->rowsTotal += 1;
+ pInfo->pRes->info.rows += 1;
+ }
+ }
+}
+
+static void ensureOutputBuf(SSLimitOperatorInfo * pInfo, SSDataBlock *pResultBlock, int32_t numOfRows) {
+ if (pInfo->capacity < pResultBlock->info.rows + numOfRows) {
+ int32_t total = pResultBlock->info.rows + numOfRows;
+
+ size_t num = taosArrayGetSize(pResultBlock->pDataBlock);
+ for (int32_t i = 0; i < num; ++i) {
+ SColumnInfoData *pInfoData = taosArrayGet(pResultBlock->pDataBlock, i);
+
+ char *tmp = realloc(pInfoData->pData, total * pInfoData->info.bytes);
+ if (tmp != NULL) {
+ pInfoData->pData = tmp;
+ } else {
+ // todo handle the malloc failure
+ }
+
+ pInfo->capacity = total;
+ pInfo->threshold = (int64_t) (total * 0.8);
+ }
+ }
+}
+
+enum {
+ BLOCK_NEW_GROUP = 1,
+ BLOCK_NO_GROUP = 2,
+ BLOCK_SAME_GROUP = 3,
+};
+
+static int32_t doSlimitImpl(SOperatorInfo* pOperator, SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock) {
+ int32_t rowIndex = 0;
+
+ while (rowIndex < pBlock->info.rows) {
+ int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
+
+ bool samegroup = true;
+ if (pInfo->hasPrev) {
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColIndex *pIndex = taosArrayGet(pInfo->orderColumnList, i);
+ SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex);
+
+ SColumnInfo *pColInfo = &pColInfoData->info;
+
+ char *d = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
+ int32_t ret = columnValueAscendingComparator(pInfo->prevRow[i], d, pColInfo->type, pColInfo->bytes);
+ if (ret != 0) { // it is a new group
+ samegroup = false;
+ break;
+ }
+ }
+ }
+
+ if (!samegroup || !pInfo->hasPrev) {
+ pInfo->ignoreCurrentGroup = false;
+ savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, rowIndex, &pInfo->hasPrev);
+
+ pInfo->currentOffset = pInfo->limit.offset; // reset the offset value for a new group
+ pInfo->rowsTotal = 0;
+
+ if (pInfo->currentGroupOffset > 0) {
+ pInfo->ignoreCurrentGroup = true;
+ pInfo->currentGroupOffset -= 1; // now we are in the next group data
+ rowIndex += 1;
+ continue;
+ }
+
+ // A new group has arrived according to the result rows, and the group limitation has already reached.
+ // Let's jump out of current loop and return immediately.
+ if (pInfo->slimit.limit >= 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
+ setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
+ pOperator->status = OP_EXEC_DONE;
+ return BLOCK_NO_GROUP;
+ }
+
+ pInfo->groupTotal += 1;
+
+ // data in current group not allowed, return if current result does not belong to the previous group.And there
+ // are results exists in current SSDataBlock
+ if (!pInfo->multigroupResult && !samegroup && pInfo->pRes->info.rows > 0) {
+ return BLOCK_NEW_GROUP;
+ }
+
+ doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
+
+ } else { // handle the offset in the same group
+ // All the data in current group needs to be discarded, due to the limit parameter in the SQL statement
+ if (pInfo->ignoreCurrentGroup) {
+ rowIndex += 1;
+ continue;
+ }
+
+ doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
+ }
+
+ rowIndex += 1;
+ }
+
+ return BLOCK_SAME_GROUP;
+}
+
+SSDataBlock* doSLimit(void* param, bool* newgroup) {
+ SOperatorInfo *pOperator = (SOperatorInfo *)param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SSLimitOperatorInfo *pInfo = pOperator->info;
+ pInfo->pRes->info.rows = 0;
+
+ if (pInfo->pPrevBlock != NULL) {
+ ensureOutputBuf(pInfo, pInfo->pRes, pInfo->pPrevBlock->info.rows);
+ int32_t ret = doSlimitImpl(pOperator, pInfo, pInfo->pPrevBlock);
+ assert(ret != BLOCK_NEW_GROUP);
+
+ pInfo->pPrevBlock = NULL;
+ }
+
+ assert(pInfo->currentGroupOffset >= 0);
+
+ while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ if (pBlock == NULL) {
+ return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
+ }
+
+ ensureOutputBuf(pInfo, pInfo->pRes, pBlock->info.rows);
+ int32_t ret = doSlimitImpl(pOperator, pInfo, pBlock);
+ if (ret == BLOCK_NEW_GROUP) {
+ pInfo->pPrevBlock = pBlock;
+ return pInfo->pRes;
+ }
+
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
+ }
+
+ // now the number of rows in current group is enough, let's return to the invoke function
+ if (pInfo->pRes->info.rows > pInfo->threshold) {
+ return pInfo->pRes;
+ }
+ }
+}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 68ef8c25d652d327246047f112b42f789e93a5e8..07db18b498873f4a023d8ea76aadd7e76a4cd8d2 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -20,7 +20,7 @@
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
-#include "tschemautil.h"
+#include "qTableMeta.h"
#include "tsclient.h"
#include "taos.h"
#include "tscSubquery.h"
@@ -53,7 +53,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
// one column for each row
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
@@ -71,7 +71,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
numOfRows = numOfRows + tscGetNumOfTags(pMeta);
}
- tscInitResObjForLocalQuery(pSql, totalNumOfRows, rowLen);
+ pSql->res.pMerger = tscInitResObjForLocalQuery(totalNumOfRows, rowLen, pSql->self);
+ tscInitResForMerge(&pSql->res);
+
SSchema *pSchema = tscGetTableSchema(pMeta);
for (int32_t i = 0; i < numOfRows; ++i) {
@@ -154,14 +156,14 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pSql->cmd.numOfCols = numOfCols;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
pQueryInfo->order.order = TSDB_ORDER_ASC;
TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE};
tstrncpy(f.name, "Field", sizeof(f.name));
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
@@ -171,7 +173,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Type", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
-1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE;
@@ -181,7 +183,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Length", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
-1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t);
@@ -191,7 +193,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
tstrncpy(f.name, "Note", sizeof(f.name));
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
-1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE;
@@ -199,7 +201,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
}
static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
assert(tscGetMetaInfo(pQueryInfo, 0)->pTableMeta != NULL);
@@ -321,11 +323,12 @@ TAOS_ROW tscFetchRow(void *param) {
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
- pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
+ pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -389,7 +392,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
SColumnIndex index = {0};
pSql->cmd.numOfCols = 2;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
pQueryInfo->order.order = TSDB_ORDER_ASC;
TAOS_FIELD f;
@@ -404,7 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
}
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes;
@@ -417,7 +420,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
}
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE;
@@ -427,12 +430,13 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) {
SSqlRes *pRes = &pSql->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
int32_t numOfRows = 1;
if (strlen(ddl) == 0) {
}
- tscInitResObjForLocalQuery(pSql, numOfRows, rowLen);
+ pSql->res.pMerger = tscInitResObjForLocalQuery(numOfRows, rowLen, pSql->self);
+ tscInitResForMerge(&pSql->res);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows;
@@ -444,7 +448,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
return 0;
}
static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result);
tscFieldInfoUpdateOffset(pQueryInfo);
@@ -532,7 +536,7 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) {
}
buf[0] = 0;
- STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
+ STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
free(buf);
@@ -553,7 +557,7 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) {
return TSDB_CODE_SUCCESS;
}
static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
@@ -607,7 +611,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
}
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
@@ -634,7 +638,7 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
}
static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, char *ddl) {
char *result = ddl;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
@@ -675,11 +679,14 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
}
static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
+ if (pSql->cmd.command == TSDB_SQL_SHOW_CREATE_STABLE && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@@ -701,7 +708,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
}
static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -710,13 +717,12 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder));
+ SCreateBuilder *param = (SCreateBuilder *)calloc(1, sizeof(SCreateBuilder));
if (param == NULL) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
-
- strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
+ tNameGetDbName(&pTableMetaInfo->name, param->buf);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@@ -728,7 +734,7 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resBytes = TSDB_USER_LEN + TSDB_DATA_TYPE_BINARY;
@@ -755,7 +761,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
extractDBName(pSql->pTscObj->db, db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
@@ -782,7 +788,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
static int32_t tscProcessServerVer(SSqlObj *pSql) {
const char* v = pSql->pTscObj->sversion;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
@@ -805,7 +811,7 @@ static int32_t tscProcessServerVer(SSqlObj *pSql) {
}
static int32_t tscProcessClientVer(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
@@ -845,19 +851,23 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) {
SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid);
if (pHb != NULL) {
pSql->res.code = pHb->res.code;
- taosReleaseRef(tscObjRef, pObj->hbrid);
}
if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ taosReleaseRef(tscObjRef, pObj->hbrid);
return pSql->res.code;
}
- pSql->res.code = checkForOnlineNode(pHb);
+ if (pHb != NULL) {
+ pSql->res.code = checkForOnlineNode(pHb);
+ taosReleaseRef(tscObjRef, pObj->hbrid);
+ }
+
if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
return pSql->res.code;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
int32_t val = 1;
@@ -871,7 +881,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
pCmd->numOfCols = 1;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
pQueryInfo->order.order = TSDB_ORDER_ASC;
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
@@ -880,10 +890,11 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
+ pSql->res.pMerger = tscInitResObjForLocalQuery(1, (int32_t)valueLength, pSql->self);
+ tscInitResForMerge(&pSql->res);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
- pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0);
+ pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
memcpy(pRes->data, val, pInfo->field.bytes);
}
@@ -908,12 +919,13 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qId = 0x1;
pRes->numOfRows = 0;
- } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
+ } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE || pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaMap);
+ taosCacheEmpty(tscVgroupListBuf);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
@@ -926,7 +938,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
} else if (pCmd->command == TSDB_SQL_SERV_STATUS) {
pRes->code = tscProcessServStatus(pSql);
} else {
- pRes->code = TSDB_CODE_TSC_INVALID_SQL;
+ pRes->code = TSDB_CODE_TSC_INVALID_OPERATION;
tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
}
diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c
deleted file mode 100644
index 5fe020bb33b963e02291060286c135e6db59756f..0000000000000000000000000000000000000000
--- a/src/client/src/tscLocalMerge.c
+++ /dev/null
@@ -1,1780 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include "tscLocalMerge.h"
-#include "tscSubquery.h"
-#include "os.h"
-#include "texpr.h"
-#include "tlosertree.h"
-#include "tscLog.h"
-#include "tscUtil.h"
-#include "tschemautil.h"
-#include "tsclient.h"
-#include "qUtil.h"
-
-typedef struct SCompareParam {
- SLocalDataSource **pLocalData;
- tOrderDescriptor * pDesc;
- int32_t num;
- int32_t groupOrderType;
-} SCompareParam;
-
-int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
- int32_t pLeftIdx = *(int32_t *)pLeft;
- int32_t pRightIdx = *(int32_t *)pRight;
-
- SCompareParam * pParam = (SCompareParam *)param;
- tOrderDescriptor * pDesc = pParam->pDesc;
- SLocalDataSource **pLocalData = pParam->pLocalData;
-
- /* this input is exhausted, set the special value to denote this */
- if (pLocalData[pLeftIdx]->rowIdx == -1) {
- return 1;
- }
-
- if (pLocalData[pRightIdx]->rowIdx == -1) {
- return -1;
- }
-
- if (pParam->groupOrderType == TSDB_ORDER_DESC) { // desc
- return compare_d(pDesc, pParam->num, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data,
- pParam->num, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data);
- } else {
- return compare_a(pDesc, pParam->num, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data,
- pParam->num, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data);
- }
-}
-
-static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) {
- /*
- * the fields and offset attributes in pCmd and pModel may be different due to
- * merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
- */
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t i = 0; i < size; ++i) {
- SQLFunctionCtx *pCtx = &pReducer->pCtx[i];
- SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i);
-
- pCtx->pOutput = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
- pCtx->order = pQueryInfo->order.order;
- pCtx->functionId = pExpr->functionId;
-
- // input buffer hold only one point data
- int16_t offset = getColumnModelOffset(pDesc->pColumnModel, i);
- SSchema *pSchema = getColumnModelSchema(pDesc->pColumnModel, i);
-
- pCtx->pInput = pReducer->pTempBuffer->data + offset;
-
- // input data format comes from pModel
- pCtx->inputType = pSchema->type;
- pCtx->inputBytes = pSchema->bytes;
-
- // output data format yet comes from pCmd.
- pCtx->outputBytes = pExpr->resBytes;
- pCtx->outputType = pExpr->resType;
-
- pCtx->size = 1;
- pCtx->hasNull = true;
- pCtx->currentStage = MERGE_STAGE;
-
- // for top/bottom function, the output of timestamp is the first column
- int32_t functionId = pExpr->functionId;
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
- pCtx->ptsOutputBuf = pReducer->pCtx[0].pOutput;
- pCtx->param[2].i64 = pQueryInfo->order.order;
- pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
- pCtx->param[1].i64 = pQueryInfo->order.orderColId;
- } else if (functionId == TSDB_FUNC_APERCT) {
- pCtx->param[0].i64 = pExpr->param[0].i64;
- pCtx->param[0].nType = pExpr->param[0].nType;
- } else if (functionId == TSDB_FUNC_BLKINFO) {
- pCtx->param[0].i64 = pExpr->param[0].i64;
- pCtx->param[0].nType = pExpr->param[0].nType;
- pCtx->numOfParams = 1;
- }
-
- pCtx->interBufBytes = pExpr->interBytes;
- pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo));
- pCtx->stableQuery = true;
- }
-
- int16_t n = 0;
- int16_t tagLen = 0;
- SQLFunctionCtx **pTagCtx = calloc(pQueryInfo->fieldsInfo.numOfOutput, POINTER_BYTES);
-
- SQLFunctionCtx *pCtx = NULL;
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) {
- tagLen += pExpr->resBytes;
- pTagCtx[n++] = &pReducer->pCtx[i];
- } else if ((aAggs[pExpr->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
- pCtx = &pReducer->pCtx[i];
- }
- }
-
- if (n == 0 || pCtx == NULL) {
- free(pTagCtx);
- } else {
- pCtx->tagInfo.pTagCtxList = pTagCtx;
- pCtx->tagInfo.numOfTagCols = n;
- pCtx->tagInfo.tagsLen = tagLen;
- }
-}
-
-static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
- int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
- int32_t offset = 0;
-
- SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo));
- for(int32_t i = 0; i < numOfCols; ++i) {
- SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
-
- if (pIField->pArithExprInfo == NULL) {
- SSqlExpr* pExpr = pIField->pSqlExpr;
-
- pFillCol[i].col.bytes = pExpr->resBytes;
- pFillCol[i].col.type = (int8_t)pExpr->resType;
- pFillCol[i].col.colId = pExpr->colInfo.colId;
- pFillCol[i].flag = pExpr->colInfo.flag;
- pFillCol[i].col.offset = offset;
- pFillCol[i].functionId = pExpr->functionId;
- pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
- } else {
- pFillCol[i].col.bytes = pIField->field.bytes;
- pFillCol[i].col.type = (int8_t)pIField->field.type;
- pFillCol[i].col.colId = -100;
- pFillCol[i].flag = TSDB_COL_NORMAL;
- pFillCol[i].col.offset = offset;
- pFillCol[i].functionId = -1;
- pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
- }
-
- offset += pFillCol[i].col.bytes;
- }
-
- return pFillCol;
-}
-
-void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
- SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
- SSqlCmd* pCmd = &pSql->cmd;
- SSqlRes* pRes = &pSql->res;
-
- if (pMemBuffer == NULL) {
- tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- tscError("pMemBuffer:%p is NULL", pMemBuffer);
- pRes->code = TSDB_CODE_TSC_APP_ERROR;
- return;
- }
-
- if (pDesc->pColumnModel == NULL) {
- tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self);
- pRes->code = TSDB_CODE_TSC_APP_ERROR;
- return;
- }
-
- int32_t numOfFlush = 0;
- for (int32_t i = 0; i < numOfBuffer; ++i) {
- int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
- if (len == 0) {
- tscDebug("0x%"PRIx64" no data retrieved from orderOfVnode:%d", pSql->self, i + 1);
- continue;
- }
-
- numOfFlush += len;
- }
-
- if (numOfFlush == 0 || numOfBuffer == 0) {
- tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty
- tscDebug("0x%"PRIx64" retrieved no data", pSql->self);
- return;
- }
-
- if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
- tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity,
- pMemBuffer[0]->pageSize);
-
- tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- pRes->code = TSDB_CODE_TSC_APP_ERROR;
- return;
- }
-
- size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
-
- SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
- if (pReducer == NULL) {
- tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self);
-
- tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return;
- }
-
- pReducer->pExtMemBuffer = pMemBuffer;
- pReducer->pLocalDataSrc = (SLocalDataSource **)&pReducer[1];
- assert(pReducer->pLocalDataSrc != NULL);
-
- pReducer->numOfBuffer = numOfFlush;
- pReducer->numOfVnode = numOfBuffer;
-
- pReducer->pDesc = pDesc;
- tscDebug("0x%"PRIx64" the number of merged leaves is: %d", pSql->self, pReducer->numOfBuffer);
-
- int32_t idx = 0;
- for (int32_t i = 0; i < numOfBuffer; ++i) {
- int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength;
-
- for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
- SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
- if (ds == NULL) {
- tscError("0x%"PRIx64" failed to create merge structure", pSql->self);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tfree(pReducer);
- return;
- }
-
- pReducer->pLocalDataSrc[idx] = ds;
-
- ds->pMemBuffer = pMemBuffer[i];
- ds->flushoutIdx = j;
- ds->filePage.num = 0;
- ds->pageId = 0;
- ds->rowIdx = 0;
-
- tscDebug("0x%"PRIx64" load data from disk into memory, orderOfVnode:%d, total:%d", pSql->self, i + 1, idx + 1);
- tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0);
-#ifdef _DEBUG_VIEW
- printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.num);
- SSrcColumnInfo colInfo[256] = {0};
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- tscGetSrcColumnInfo(colInfo, pQueryInfo);
-
- tColModelDisplayEx(pDesc->pColumnModel, ds->filePage.data, ds->filePage.num,
- pMemBuffer[0]->numOfElemsPerPage, colInfo);
-#endif
-
- if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
- tscDebug("0x%"PRIx64" flush data is empty, ignore %d flush record", pSql->self, idx);
- tfree(ds);
- continue;
- }
-
- idx += 1;
- }
- }
-
- // no data actually, no need to merge result.
- if (idx == 0) {
- tfree(pReducer);
- return;
- }
-
- pReducer->numOfBuffer = idx;
-
- SCompareParam *param = malloc(sizeof(SCompareParam));
- if (param == NULL) {
- tfree(pReducer);
- return;
- }
-
- param->pLocalData = pReducer->pLocalDataSrc;
- param->pDesc = pReducer->pDesc;
- param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
- pReducer->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
-
- pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
- if (pReducer->pLoserTree == NULL || pRes->code != 0) {
- tfree(param);
- tfree(pReducer);
- return;
- }
-
- // the input data format follows the old format, but output in a new format.
- // so, all the input must be parsed as old format
- pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx));
- pReducer->rowSize = pMemBuffer[0]->nElemSize;
-
- tscRestoreFuncForSTableQuery(pQueryInfo);
- tscFieldInfoUpdateOffset(pQueryInfo);
-
- if (pReducer->rowSize > pMemBuffer[0]->pageSize) {
- assert(false); // todo fixed row size is larger than the minimum page size;
- }
-
- pReducer->hasPrevRow = false;
- pReducer->hasUnprocessedRow = false;
-
- pReducer->prevRowOfInput = (char *)calloc(1, pReducer->rowSize);
-
- // used to keep the latest input row
- pReducer->pTempBuffer = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage));
- pReducer->discardData = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage));
- pReducer->discard = false;
-
- pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16;
- pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
-
- pReducer->resColModel = finalmodel;
- pReducer->resColModel->capacity = pReducer->nResultBufSize;
- pReducer->finalModel = pFFModel;
-
- int32_t expandFactor = 1;
- if (finalmodel->rowSize > 0) {
- bool topBotQuery = tscIsTopbotQuery(pQueryInfo);
- if (topBotQuery) {
- expandFactor = tscGetTopbotQueryParam(pQueryInfo);
- pReducer->resColModel->capacity /= (finalmodel->rowSize * expandFactor);
- pReducer->resColModel->capacity *= expandFactor;
- } else {
- pReducer->resColModel->capacity /= finalmodel->rowSize;
- }
- }
-
- assert(finalmodel->rowSize > 0 && finalmodel->rowSize <= pReducer->rowSize);
-
- pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
-
- if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
- pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
- tfree(pReducer->pTempBuffer);
- tfree(pReducer->discardData);
- tfree(pReducer->pResultBuf);
- tfree(pReducer->pFinalRes);
- tfree(pReducer->prevRowOfInput);
- tfree(pReducer->pLoserTree);
- tfree(param);
- tfree(pReducer);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return;
- }
-
- pReducer->pTempBuffer->num = 0;
-
- tscCreateResPointerInfo(pRes, pQueryInfo);
- tscInitSqlContext(pCmd, pReducer, pDesc);
-
- // we change the capacity of schema to denote that there is only one row in temp buffer
- pReducer->pDesc->pColumnModel->capacity = 1;
-
- // restore the limitation value at the last stage
- if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
- pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
- pQueryInfo->limit.offset = pQueryInfo->prjOffset;
- }
-
- pReducer->offset = (int32_t)pQueryInfo->limit.offset;
-
- pRes->pLocalMerger = pReducer;
- pRes->numOfGroups = 0;
-
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
- int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
-
- if (pQueryInfo->fillType != TSDB_FILL_NONE) {
- SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
- // support sql like: select selective_function, tag1... where ... group by tag3... fill(not fill none)
- // the group by expr columns and select tags are different
- int32_t numOfCols = tscNumOfFields(pQueryInfo);
- int32_t numOfTags = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- if (TSDB_COL_IS_TAG(pFillCol[i].flag)) {
- numOfTags++;
- }
- }
- pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, numOfTags,
- 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
- tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
- }
-}
-
-static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage,
- int32_t orderType) {
- if (pPage->num == 0) {
- return 0;
- }
-
- assert(pPage->num <= pDesc->pColumnModel->capacity);
-
- // sort before flush to disk, the data must be consecutively put on tFilePage.
- if (pDesc->orderInfo.numOfCols > 0) {
- tColDataQSort(pDesc, (int32_t)pPage->num, 0, (int32_t)pPage->num - 1, pPage->data, orderType);
- }
-
-#ifdef _DEBUG_VIEW
- printf("%" PRIu64 " rows data flushed to disk after been sorted:\n", pPage->num);
- tColModelDisplay(pDesc->pColumnModel, pPage->data, pPage->num, pPage->num);
-#endif
-
- // write to cache after being sorted
- if (tExtMemBufferPut(pMemoryBuf, pPage->data, (int32_t)pPage->num) < 0) {
- tscError("failed to save data in temporary buffer");
- return -1;
- }
-
- pPage->num = 0;
- return 0;
-}
-
-int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType) {
- int32_t ret = 0;
- if ((ret = tscFlushTmpBufferImpl(pMemoryBuf, pDesc, pPage, orderType)) != 0) {
- return ret;
- }
-
- if ((ret = tExtMemBufferFlush(pMemoryBuf)) != 0) {
- return ret;
- }
-
- return 0;
-}
-
-int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
- int32_t numOfRows, int32_t orderType) {
- SColumnModel *pModel = pDesc->pColumnModel;
-
- if (pPage->num + numOfRows <= pModel->capacity) {
- tColModelAppend(pModel, pPage, data, 0, numOfRows, numOfRows);
- return 0;
- }
-
- // current buffer is overflow, flush data to extensive buffer
- int32_t numOfRemainEntries = pModel->capacity - (int32_t)pPage->num;
- tColModelAppend(pModel, pPage, data, 0, numOfRemainEntries, numOfRows);
-
- // current buffer is full, need to flushed to disk
- assert(pPage->num == pModel->capacity);
- int32_t code = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType);
- if (code != 0) {
- return code;
- }
-
- int32_t remain = numOfRows - numOfRemainEntries;
-
- while (remain > 0) {
- int32_t numOfWriteElems = 0;
- if (remain > pModel->capacity) {
- numOfWriteElems = pModel->capacity;
- } else {
- numOfWriteElems = remain;
- }
-
- tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows);
-
- if (pPage->num == pModel->capacity) {
- if ((code = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType)) != TSDB_CODE_SUCCESS) {
- return code;
- }
- } else {
- pPage->num = numOfWriteElems;
- }
-
- remain -= numOfWriteElems;
- numOfRemainEntries += numOfWriteElems;
- }
-
- return 0;
-}
-
-void tscDestroyLocalMerger(SSqlObj *pSql) {
- if (pSql == NULL) {
- return;
- }
-
- SSqlRes *pRes = &(pSql->res);
- if (pRes->pLocalMerger == NULL) {
- return;
- }
-
- SSqlCmd * pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- // there is no more result, so we release all allocated resource
- SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL);
- if (pLocalMerge != NULL) {
- pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo);
-
- if (pLocalMerge->pCtx != NULL) {
- int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
- for (int32_t i = 0; i < numOfExprs; ++i) {
- SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i];
-
- tVariantDestroy(&pCtx->tag);
- tfree(pCtx->resultInfo);
-
- if (pCtx->tagInfo.pTagCtxList != NULL) {
- tfree(pCtx->tagInfo.pTagCtxList);
- }
- }
-
- tfree(pLocalMerge->pCtx);
- }
-
- tfree(pLocalMerge->prevRowOfInput);
-
- tfree(pLocalMerge->pTempBuffer);
- tfree(pLocalMerge->pResultBuf);
-
- if (pLocalMerge->pLoserTree) {
- tfree(pLocalMerge->pLoserTree->param);
- tfree(pLocalMerge->pLoserTree);
- }
-
- tfree(pLocalMerge->pFinalRes);
- tfree(pLocalMerge->discardData);
-
- tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel,
- pLocalMerge->numOfVnode);
- for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) {
- tfree(pLocalMerge->pLocalDataSrc[i]);
- }
-
- pLocalMerge->numOfBuffer = 0;
- pLocalMerge->numOfCompleted = 0;
- free(pLocalMerge);
- } else {
- tscDebug("0x%"PRIx64" already freed or another free function is invoked", pSql->self);
- }
-
- tscDebug("0x%"PRIx64" free local reducer finished", pSql->self);
-}
-
-static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, SColumnModel *pModel) {
- int32_t numOfGroupByCols = 0;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
- numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols;
- }
-
- // primary timestamp column is involved in final result
- if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
- numOfGroupByCols++;
- }
-
- int32_t *orderColIndexList = (int32_t *)calloc(numOfGroupByCols, sizeof(int32_t));
- if (orderColIndexList == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- if (numOfGroupByCols > 0) {
-
- if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
- int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
- int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
-
- // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
- for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
- orderColIndexList[i] = startCols++;
- }
-
- if (pQueryInfo->interval.interval != 0) {
- // the first column is the timestamp, handles queries like "interval(10m) group by tags"
- orderColIndexList[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX; //TODO ???
- }
- } else {
- /*
- * 1. the orderby ts asc/desc projection query for the super table
- * 2. interval query without groupby clause
- */
- if (pQueryInfo->interval.interval != 0) {
- orderColIndexList[0] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- } else {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
- for (int32_t i = 0; i < size; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- orderColIndexList[0] = i;
- }
- }
- }
-
- assert(pQueryInfo->order.orderColId == PRIMARYKEY_TIMESTAMP_COL_INDEX);
- }
- }
-
- *pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
- tfree(orderColIndexList);
-
- if (*pOrderDesc == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- } else {
- return TSDB_CODE_SUCCESS;
- }
-}
-
-bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- // disable merge procedure for column projection query
- int16_t functionId = pReducer->pCtx[0].functionId;
- if (pReducer->orderPrjOnSTable) {
- return true;
- }
-
- if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) {
- return false;
- }
-
- tOrderDescriptor *pOrderDesc = pReducer->pDesc;
- SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
-
- // no group by columns, all data belongs to one group
- int32_t numOfCols = orderInfo->numOfCols;
- if (numOfCols <= 0) {
- return true;
- }
-
- if (orderInfo->colIndex[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- /*
- * super table interval query
- * if the order columns is the primary timestamp, all result data belongs to one group
- */
- assert(pQueryInfo->interval.interval > 0);
- if (numOfCols == 1) {
- return true;
- }
- } else { // simple group by query
- assert(pQueryInfo->interval.interval == 0);
- }
-
- // only one row exists
- int32_t index = orderInfo->colIndex[0];
- int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
-
- int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
- return ret == 0;
-}
-
-int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
- SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SSchema * pSchema = NULL;
- SColumnModel *pModel = NULL;
- *pFinalModel = NULL;
-
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
- if (*pMemBuffer == NULL) {
- tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return pRes->code;
- }
-
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
- if (pSchema == NULL) {
- tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return pRes->code;
- }
-
- int32_t rlen = 0;
- for (int32_t i = 0; i < size; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
-
- pSchema[i].bytes = pExpr->resBytes;
- pSchema[i].type = (int8_t)pExpr->resType;
- tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name));
-
- rlen += pExpr->resBytes;
- }
-
- int32_t capacity = 0;
- if (rlen != 0) {
- capacity = nBufferSizes / rlen;
- }
-
- pModel = createColumnModel(pSchema, (int32_t)size, capacity);
-
- int32_t pg = DEFAULT_PAGE_SIZE;
- int32_t overhead = sizeof(tFilePage);
- while((pg - overhead) < pModel->rowSize * 2) {
- pg *= 2;
- }
-
- size_t numOfSubs = pSql->subState.numOfSub;
- assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups);
- for (int32_t i = 0; i < numOfSubs; ++i) {
- (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel);
- (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL;
- }
-
- if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tfree(pSchema);
- return pRes->code;
- }
-
- // final result depends on the fields number
- memset(pSchema, 0, sizeof(SSchema) * size);
-
- for (int32_t i = 0; i < size; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
-
- SSchema p1 = {0};
- if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- p1 = *tGetTbnameColumnSchema();
- } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
- p1.bytes = pExpr->resBytes;
- p1.type = (uint8_t) pExpr->resType;
- tstrncpy(p1.name, pExpr->aliasName, tListLen(p1.name));
- } else {
- p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
- }
-
- int32_t inter = 0;
- int16_t type = -1;
- int16_t bytes = 0;
-
- // the final result size and type in the same as query on single table.
- // so here, set the flag to be false;
- int32_t functionId = pExpr->functionId;
- if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) {
- type = pModel->pFields[i].field.type;
- bytes = pModel->pFields[i].field.bytes;
- } else {
- if (functionId == TSDB_FUNC_FIRST_DST) {
- functionId = TSDB_FUNC_FIRST;
- } else if (functionId == TSDB_FUNC_LAST_DST) {
- functionId = TSDB_FUNC_LAST;
- } else if (functionId == TSDB_FUNC_STDDEV_DST) {
- functionId = TSDB_FUNC_STDDEV;
- }
-
- int32_t ret = getResultDataInfo(p1.type, p1.bytes, functionId, 0, &type, &bytes, &inter, 0, false);
- assert(ret == TSDB_CODE_SUCCESS);
- }
-
- pSchema[i].type = (uint8_t)type;
- pSchema[i].bytes = bytes;
- strcpy(pSchema[i].name, pModel->pFields[i].field.name);
- }
-
- *pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
-
- memset(pSchema, 0, sizeof(SSchema) * size);
- size = tscNumOfFields(pQueryInfo);
-
- for(int32_t i = 0; i < size; ++i) {
- SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
- pSchema[i].bytes = pField->field.bytes;
- pSchema[i].type = pField->field.type;
- tstrncpy(pSchema[i].name, pField->field.name, tListLen(pSchema[i].name));
- }
-
- *pFFModel = createColumnModel(pSchema, (int32_t) size, capacity);
-
- tfree(pSchema);
- return TSDB_CODE_SUCCESS;
-}
-
-/**
- * @param pMemBuffer
- * @param pDesc
- * @param pFinalModel
- * @param numOfVnodes
- */
-void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel *pFFModel,
- int32_t numOfVnodes) {
- destroyColumnModel(pFinalModel);
- destroyColumnModel(pFFModel);
-
- tOrderDescDestroy(pDesc);
-
- for (int32_t i = 0; i < numOfVnodes; ++i) {
- pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
- }
-
- tfree(pMemBuffer);
-}
-
-/**
- *
- * @param pLocalMerge
- * @param pOneInterDataSrc
- * @param treeList
- * @return the number of remain input source. if ret == 0, all data has been handled
- */
-int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
- bool *needAdjustLoserTree) {
- pOneInterDataSrc->rowIdx = 0;
- pOneInterDataSrc->pageId += 1;
-
- if ((uint32_t)pOneInterDataSrc->pageId <
- pOneInterDataSrc->pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[pOneInterDataSrc->flushoutIdx].numOfPages) {
- tExtMemBufferLoadData(pOneInterDataSrc->pMemBuffer, &(pOneInterDataSrc->filePage), pOneInterDataSrc->flushoutIdx,
- pOneInterDataSrc->pageId);
-
-#if defined(_DEBUG_VIEW)
- printf("new page load to buffer\n");
- tColModelDisplay(pOneInterDataSrc->pMemBuffer->pColumnModel, pOneInterDataSrc->filePage.data,
- pOneInterDataSrc->filePage.num, pOneInterDataSrc->pMemBuffer->pColumnModel->capacity);
-#endif
- *needAdjustLoserTree = true;
- } else {
- pLocalMerge->numOfCompleted += 1;
-
- pOneInterDataSrc->rowIdx = -1;
- pOneInterDataSrc->pageId = -1;
- *needAdjustLoserTree = true;
- }
-
- return pLocalMerge->numOfBuffer;
-}
-
-void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
- SLoserTreeInfo *pTree) {
- /*
- * load a new data page into memory for intermediate dataset source,
- * since it's last record in buffer has been chosen to be processed, as the winner of loser-tree
- */
- bool needToAdjust = true;
- if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
- loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
- }
-
- /*
- * adjust loser tree otherwise, according to new candidate data
- * if the loser tree is rebuild completed, we do not need to adjust
- */
- if (needToAdjust) {
- int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
-
-#ifdef _DEBUG_VIEW
- printf("before adjust:\t");
- tLoserTreeDisplay(pTree);
-#endif
-
- tLoserTreeAdjust(pTree, leafNodeIdx);
-
-#ifdef _DEBUG_VIEW
- printf("\nafter adjust:\t");
- tLoserTreeDisplay(pTree);
- printf("\n");
-#endif
- }
-}
-
-void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
- // discard following dataset in the same group and reset the interpolation information
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- if (pFillInfo != NULL) {
- int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
- int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
-
- taosResetFillInfo(pFillInfo, revisedSTime);
- }
-
- pLocalMerge->discard = true;
- pLocalMerge->discardData->num = 0;
-
- SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
- tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1);
-}
-
-static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) {
- assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
-
- tFilePage * pBeforeFillData = pLocalMerge->pResultBuf;
-
- pRes->data = pLocalMerge->pFinalRes;
- pRes->numOfRows = (int32_t) pBeforeFillData->num;
-
- if (pQueryInfo->limit.offset > 0) {
- if (pQueryInfo->limit.offset < pRes->numOfRows) {
- int32_t prevSize = (int32_t) pBeforeFillData->num;
- tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
-
- /* remove the hole in column model */
- tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
-
- pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
- pQueryInfo->limit.offset = 0;
- } else {
- pQueryInfo->limit.offset -= pRes->numOfRows;
- pRes->numOfRows = 0;
- }
- }
-
- if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
- pRes->numOfRows = 0;
- pBeforeFillData->num = 0;
- pLocalMerge->discard = true;
- return;
- }
-
- pRes->numOfRowsGroup += pRes->numOfRows;
-
- // impose the limitation of output rows on the final result
- if (pQueryInfo->limit.limit >= 0 && pRes->numOfRowsGroup > pQueryInfo->limit.limit) {
- int32_t prevSize = (int32_t)pBeforeFillData->num;
- int32_t overflow = (int32_t)(pRes->numOfRowsGroup - pQueryInfo->limit.limit);
- assert(overflow < pRes->numOfRows);
-
- pRes->numOfRowsGroup = pQueryInfo->limit.limit;
- pRes->numOfRows -= overflow;
- pBeforeFillData->num -= overflow;
-
- tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
-
- // set remain data to be discarded, and reset the interpolation information
- savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo);
- }
-
- memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize));
-
- pRes->numOfClauseTotal += pRes->numOfRows;
- pBeforeFillData->num = 0;
-}
-
-/*
- * Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called
- * by "interuptHandler" function in shell
- */
-static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- tFilePage *pBeforeFillData = pLocalMerge->pResultBuf;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
-
- // todo extract function
- int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
-
- void** pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
- pResPages[i] = calloc(1, pField->bytes * pLocalMerge->resColModel->capacity);
- }
-
- while (1) {
- int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity);
-
- if (pQueryInfo->limit.offset < newRows) {
- newRows -= pQueryInfo->limit.offset;
-
- if (pQueryInfo->limit.offset > 0) {
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
- memmove(pResPages[i], ((char*)pResPages[i]) + pField->bytes * pQueryInfo->limit.offset,
- (size_t)(newRows * pField->bytes));
- }
- }
-
- pRes->data = pLocalMerge->pFinalRes;
- pRes->numOfRows = (int32_t) newRows;
-
- pQueryInfo->limit.offset = 0;
- break;
- } else {
- pQueryInfo->limit.offset -= newRows;
- pRes->numOfRows = 0;
-
- if (!taosFillHasMoreResults(pFillInfo)) {
- if (!doneOutput) { // reduce procedure has not completed yet, but current results for fill are exhausted
- break;
- }
-
- // all output in current group are completed
- int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity);
- if (totalRemainRows <= 0) {
- break;
- }
- }
- }
- }
-
- if (pRes->numOfRows > 0) {
- int32_t currentTotal = (int32_t)(pRes->numOfRowsGroup + pRes->numOfRows);
-
- if (pQueryInfo->limit.limit >= 0 && currentTotal > pQueryInfo->limit.limit) {
- int32_t overflow = (int32_t)(currentTotal - pQueryInfo->limit.limit);
-
- pRes->numOfRows -= overflow;
- assert(pRes->numOfRows >= 0);
-
- /* set remain data to be discarded, and reset the interpolation information */
- savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo);
- }
-
- int32_t offset = 0;
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
- memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i], (size_t)(pField->bytes * pRes->numOfRows));
- offset += pField->bytes;
- }
-
- pRes->numOfRowsGroup += pRes->numOfRows;
- pRes->numOfClauseTotal += pRes->numOfRows;
- }
-
- pBeforeFillData->num = 0;
- for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- tfree(pResPages[i]);
- }
-
- tfree(pResPages);
-}
-
-static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
- SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel;
- assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1);
-
- // copy to previous temp buffer
- for (int32_t i = 0; i < pColumnModel->numOfCols; ++i) {
- SSchema *pSchema = getColumnModelSchema(pColumnModel, i);
- int16_t offset = getColumnModelOffset(pColumnModel, i);
-
- memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
- }
-
- tmpBuffer->num = 0;
- pLocalMerge->hasPrevRow = true;
-}
-
-static void doExecuteFinalMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
- // the tag columns need to be set before all functions execution
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
- for (int32_t j = 0; j < size; ++j) {
- SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j];
-
- // tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
- int32_t functionId = pCtx->functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
- tVariantDestroy(&pCtx->tag);
- char* input = pCtx->pInput;
-
- if (pCtx->inputType == TSDB_DATA_TYPE_BINARY || pCtx->inputType == TSDB_DATA_TYPE_NCHAR) {
- assert(varDataLen(input) <= pCtx->inputBytes);
- tVariantCreateFromBinary(&pCtx->tag, varDataVal(input), varDataLen(input), pCtx->inputType);
- } else {
- tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
- }
-
- } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
- pCtx->param[0].i64 = pExpr->param[0].i64;
- }
-
- pCtx->currentStage = MERGE_STAGE;
-
- if (needInit) {
- aAggs[pCtx->functionId].init(pCtx);
- }
- }
-
- for (int32_t j = 0; j < size; ++j) {
- int32_t functionId = pLocalMerge->pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]);
- }
-}
-
-static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
- if (pLocalMerge->hasUnprocessedRow) {
- pLocalMerge->hasUnprocessedRow = false;
- doExecuteFinalMerge(pCmd, pLocalMerge, true);
- savePreviousRow(pLocalMerge, tmpBuffer);
- }
-}
-
-static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) {
- int64_t maxOutput = 0;
-
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
- for (int32_t j = 0; j < size; ++j) {
- /*
- * ts, tag, tagprj function can not decide the output number of current query
- * the number of output result is decided by main output
- */
- int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) {
- continue;
- }
-
- SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
- if (maxOutput < pResInfo->numOfRes) {
- maxOutput = pResInfo->numOfRes;
- }
- }
-
- return maxOutput;
-}
-
-/*
- * in handling the top/bottom query, which produce more than one rows result,
- * the tsdb_func_tags only fill the first row of results, the remain rows need to
- * filled with the same result, which is the tags, specified in group by clause
- *
- */
-static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) {
- int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t k = 0; k < size; ++k) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
- if (maxBufSize < pExpr->resBytes && pExpr->functionId == TSDB_FUNC_TAG) {
- maxBufSize = pExpr->resBytes;
- }
- }
-
- assert(maxBufSize >= 0);
-
- char *buf = malloc((size_t)maxBufSize);
- for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
- if (pCtx->functionId != TSDB_FUNC_TAG) {
- continue;
- }
-
- int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result
- memset(buf, 0, (size_t)maxBufSize);
- memcpy(buf, pCtx->pOutput, (size_t)pCtx->outputBytes);
-
- char* next = pCtx->pOutput;
- for (int32_t i = 0; i < inc; ++i) {
- next += pCtx->outputBytes;
- memcpy(next, buf, (size_t)pCtx->outputBytes);
- }
- }
-
- free(buf);
-}
-
-int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k];
- aAggs[pCtx->functionId].xFinalize(pCtx);
- }
-
- pLocalMerge->hasPrevRow = false;
-
- int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx);
- pLocalMerge->pResultBuf->num += numOfRes;
-
- fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge);
- return numOfRes;
-}
-
-/*
- * points merge:
- * points are merged according to the sort info, which is tags columns and timestamp column.
- * In case of points without either tags columns or timestamp, such as
- * results generated by simple aggregation function, we merge them all into one points
- * *Exception*: column projection query, required no merge procedure
- */
-bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
- int32_t ret = 0; // merge all result by default
-
- int16_t functionId = pLocalMerge->pCtx[0].functionId;
-
- // todo opt performance
- if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0) && pQueryInfo->distinctTag == false)) { // column projection query
- ret = 1; // disable merge procedure
- } else {
- tOrderDescriptor *pDesc = pLocalMerge->pDesc;
- if (pDesc->orderInfo.numOfCols > 0) {
- if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
- // todo refactor comparator
- ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
- } else { // desc
- ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
- }
- }
- }
-
- /* if ret == 0, means the result belongs to the same group */
- return (ret == 0);
-}
-
-static bool reachGroupResultLimit(SQueryInfo *pQueryInfo, SSqlRes *pRes) {
- return (pRes->numOfGroups >= pQueryInfo->slimit.limit && pQueryInfo->slimit.limit >= 0);
-}
-
-static bool saveGroupResultInfo(SSqlObj *pSql) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- if (pRes->numOfRowsGroup > 0) {
- pRes->numOfGroups += 1;
- }
-
- // the output group is limited by the slimit clause
- if (reachGroupResultLimit(pQueryInfo, pRes)) {
- return true;
- }
-
- // pRes->pGroupRec = realloc(pRes->pGroupRec, pRes->numOfGroups*sizeof(SResRec));
- // pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows;
- // pRes->pGroupRec[pRes->numOfGroups-1].numOfClauseTotal = pRes->numOfClauseTotal;
-
- return false;
-}
-
-
-bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) {
- bool qualified = false;
-
- for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) {
- __filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k);
- SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]};
-
- bool isnull = isNull(input, type);
- if (isnull) {
- if (fp == isNullOperator) {
- qualified = true;
- break;
- } else {
- continue;
- }
- } else {
- if (fp == notNullOperator) {
- qualified = true;
- break;
- } else if (fp == isNullOperator) {
- continue;
- }
- }
-
- if (fp(&filterElem, input, input, type)) {
- qualified = true;
- break;
- }
- }
-
- *notSkipped = qualified;
-
- return TSDB_CODE_SUCCESS;
-}
-
-
-int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) {
- *notSkipped = true;
-
- if (pQueryInfo->havingFieldNum <= 0) {
- return TSDB_CODE_SUCCESS;
- }
-
- //int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
-
- size_t numOfOutput = tscNumOfFields(pQueryInfo);
- for(int32_t i = 0; i < numOfOutput; ++i) {
- SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
- SExprFilter* pFieldFilters = pInterField->pFieldFilters;
-
- if (pFieldFilters == NULL) {
- continue;
- }
-
- int32_t type = pInterField->field.type;
-
- char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset;
-
- doFilterFieldData(pInput, pFieldFilters, type, notSkipped);
- if (*notSkipped == false) {
- return TSDB_CODE_SUCCESS;
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-
-
-/**
- *
- * @param pSql
- * @param pLocalMerge
- * @param noMoreCurrentGroupRes
- * @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups
- */
-bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- tFilePage * pResBuf = pLocalMerge->pResultBuf;
- SColumnModel *pModel = pLocalMerge->resColModel;
-
- pRes->code = TSDB_CODE_SUCCESS;
-
- /*
- * Ignore the output of the current group since this group is skipped by user
- * We set the numOfRows to be 0 and discard the possible remain results.
- */
- if (pQueryInfo->slimit.offset > 0) {
- pRes->numOfRows = 0;
- pQueryInfo->slimit.offset -= 1;
- pLocalMerge->discard = !noMoreCurrentGroupRes;
-
- if (pLocalMerge->discard) {
- SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
- tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
- }
-
- return false;
- }
-
- tColModelCompact(pModel, pResBuf, pModel->capacity);
-
- if (tscIsSecondStageQuery(pQueryInfo)) {
- doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
- }
-
- bool notSkipped = true;
-
- doHavingFilter(pQueryInfo, pResBuf, ¬Skipped);
-
- if (!notSkipped) {
- pRes->numOfRows = 0;
- pLocalMerge->discard = !noMoreCurrentGroupRes;
-
- if (pLocalMerge->discard) {
- SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
- tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
- }
-
- return notSkipped;
- }
-
- // no interval query, no fill operation
- if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
- genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
- } else {
- SFillInfo* pFillInfo = pLocalMerge->pFillInfo;
- if (pFillInfo != NULL) {
- TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
-
- taosFillSetStartInfo(pFillInfo, (int32_t)pResBuf->num, ekey);
- taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
- }
-
- doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes);
- }
-
- return true;
-}
-
-void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning
- size_t t = tscSqlExprNumOfExprs(pQueryInfo);
- for (int32_t i = 0; i < t; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- pLocalMerge->pCtx[i].pOutput = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
-
- if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
- pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].pOutput;
- }
- }
-
- memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage));
-}
-
-static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) {
- // In handling data in other groups, we need to reset the interpolation information for a new group data
- pRes->numOfRows = 0;
- pRes->numOfRowsGroup = 0;
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- pQueryInfo->limit.offset = pLocalMerge->offset;
-
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- // for group result interpolation, do not return if not data is generated
- if (pQueryInfo->fillType != TSDB_FILL_NONE) {
- TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
- int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
- taosResetFillInfo(pLocalMerge->pFillInfo, newTime);
- }
-}
-
-static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
- return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
-}
-
-static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- SLocalMerger *pLocalMerge = pRes->pLocalMerger;
- SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
-
- if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
- assert(pQueryInfo->fillType != TSDB_FILL_NONE);
-
- tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf;
- int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
-
- // the first column must be the timestamp column
- int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
- if (rows > 0) { // do fill gap
- doFillResult(pSql, pLocalMerge, false);
- }
-
- return true;
- } else {
- return false;
- }
-}
-
-static bool doHandleLastRemainData(SSqlObj *pSql) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SLocalMerger *pLocalMerge = pRes->pLocalMerger;
- SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
-
- bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow;
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
-
- if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL ||
- prevGroupCompleted) {
- // if fillType == TSDB_FILL_NONE, return directly
- if (pQueryInfo->fillType != TSDB_FILL_NONE &&
- ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
- int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
-
- int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
- if (rows > 0) {
- doFillResult(pSql, pLocalMerge, true);
- }
- }
-
- /*
- * 1. numOfRows == 0, means no interpolation results are generated.
- * 2. if all local data sources are consumed, and no un-processed rows exist.
- *
- * No results will be generated and query completed.
- */
- if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) {
- return true;
- }
-
- // start to process result for a new group and save the result info of previous group
- if (saveGroupResultInfo(pSql)) {
- return true;
- }
-
- resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
- }
-
- return false;
-}
-
-static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- SLocalMerger *pLocalMerge = pRes->pLocalMerger;
- SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t k = 0; k < size; ++k) {
- SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
- pCtx->pOutput += pCtx->outputBytes * numOfRes;
-
- // set the correct output timestamp column position
- if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) {
- pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes);
- }
- }
-
- doExecuteFinalMerge(pCmd, pLocalMerge, true);
-}
-
-int32_t tscDoLocalMerge(SSqlObj *pSql) {
- SSqlCmd *pCmd = &pSql->cmd;
- SSqlRes *pRes = &pSql->res;
-
- tscResetForNextRetrieve(pRes);
-
- if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed
- if (pRes->code == TSDB_CODE_SUCCESS) {
- return pRes->code;
- }
-
- tscError("0x%"PRIx64" local merge abort due to error occurs, code:%s", pSql->self, tstrerror(pRes->code));
- return pRes->code;
- }
-
- SLocalMerger *pLocalMerge = pRes->pLocalMerger;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
-
- int32_t remain = 1;
- if (tscIsTopbotQuery(pQueryInfo)) {
- remain = tscGetTopbotQueryParam(pQueryInfo);
- }
-
- if (doHandleLastRemainData(pSql)) {
- return TSDB_CODE_SUCCESS;
- }
-
- if (doBuildFilledResultForGroup(pSql)) {
- return TSDB_CODE_SUCCESS;
- }
-
- SLoserTreeInfo *pTree = pLocalMerge->pLoserTree;
-
- // clear buffer
- handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
- SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
-
- while (1) {
- if (isAllSourcesCompleted(pLocalMerge)) {
- break;
- }
-
-#ifdef _DEBUG_VIEW
- printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
-#endif
- assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
-
- // chosen from loser tree
- SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index];
-
- tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1,
- pOneDataSrc->pMemBuffer->pColumnModel->capacity);
-
-#if defined(_DEBUG_VIEW)
- printf("chosen row:\t");
- SSrcColumnInfo colInfo[256] = {0};
- tscGetSrcColumnInfo(colInfo, pQueryInfo);
-
- tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
-#endif
-
- if (pLocalMerge->discard) {
- assert(pLocalMerge->hasUnprocessedRow == false);
-
- /* current record belongs to the same group of previous record, need to discard it */
- if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) {
- tmpBuffer->num = 0;
- pOneDataSrc->rowIdx += 1;
-
- adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
-
- // all inputs are exhausted, abort current process
- if (isAllSourcesCompleted(pLocalMerge)) {
- break;
- }
-
- // data belongs to the same group needs to be discarded
- continue;
- } else {
- pLocalMerge->discard = false;
- pLocalMerge->discardData->num = 0;
-
- if (saveGroupResultInfo(pSql)) {
- return TSDB_CODE_SUCCESS;
- }
-
- resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
- }
- }
-
- if (pLocalMerge->hasPrevRow) {
- if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
- // belong to the group of the previous row, continue process it
- doExecuteFinalMerge(pCmd, pLocalMerge, false);
-
- // copy to buffer
- savePreviousRow(pLocalMerge, tmpBuffer);
- } else {
- /*
- * current row does not belong to the group of previous row.
- * so the processing of previous group is completed.
- */
- int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge);
- bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer);
-
- tFilePage *pResBuf = pLocalMerge->pResultBuf;
-
- /*
- * if the previous group does NOT generate any result (pResBuf->num == 0),
- * continue to process results instead of return results.
- */
- if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num + remain >= pLocalMerge->resColModel->capacity)) {
- // does not belong to the same group
- bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);
-
- // this row needs to discard, since it belongs to the group of previous
- if (pLocalMerge->discard && sameGroup) {
- pLocalMerge->hasUnprocessedRow = false;
- tmpBuffer->num = 0;
- } else { // current row does not belongs to the previous group, so it is not be handled yet.
- pLocalMerge->hasUnprocessedRow = true;
- }
-
- resetOutputBuf(pQueryInfo, pLocalMerge);
- pOneDataSrc->rowIdx += 1;
-
- // here we do not check the return value
- adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
-
- if (pRes->numOfRows == 0) {
- handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
-
- if (!sameGroup) {
- /*
- * previous group is done, prepare for the next group
- * If previous group is not skipped, keep it in pRes->numOfGroups
- */
- if (notSkipped && saveGroupResultInfo(pSql)) {
- return TSDB_CODE_SUCCESS;
- }
-
- resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
- }
- } else {
- /*
- * if next record belongs to a new group, we do not handle this record here.
- * We start the process in a new round.
- */
- if (sameGroup) {
- handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
- }
- }
-
- // current group has no result,
- if (pRes->numOfRows == 0) {
- continue;
- } else {
- return TSDB_CODE_SUCCESS;
- }
- } else { // result buffer is not full
- doProcessResultInNextWindow(pSql, numOfRes);
- savePreviousRow(pLocalMerge, tmpBuffer);
- }
- }
- } else {
- doExecuteFinalMerge(pCmd, pLocalMerge, true);
- savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
- }
-
- pOneDataSrc->rowIdx += 1;
- adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
- }
-
- if (pLocalMerge->hasPrevRow) {
- finalizeRes(pQueryInfo, pLocalMerge);
- }
-
- if (pLocalMerge->pResultBuf->num) {
- genFinalResults(pSql, pLocalMerge, true);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) {
- SSqlRes *pRes = &pObj->res;
- if (pRes->pLocalMerger != NULL) {
- tscDestroyLocalMerger(pObj);
- }
-
- pRes->qId = 1; // hack to pass the safety check in fetch_row function
- pRes->numOfRows = 0;
- pRes->row = 0;
-
- pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
- pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
-
- /*
- * we need one additional byte space
- * the sprintf function needs one additional space to put '\0' at the end of string
- */
- size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1;
- pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize);
-
- pRes->pLocalMerger->pResultBuf->num = numOfRes;
- pRes->data = pRes->pLocalMerger->pResultBuf->data;
-}
-
-int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
- int32_t maxRowSize = MAX(rowSize, finalRowSize);
- char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
-
- size_t size = tscNumOfFields(pQueryInfo);
- SArithmeticSupport arithSup = {0};
-
- // todo refactor
- arithSup.offset = 0;
- arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
- arithSup.exprList = pQueryInfo->exprList;
- arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
-
- for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
- arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset);
- }
-
- int32_t offset = 0;
-
- for (int i = 0; i < size; ++i) {
- SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
-
- // calculate the result from several other columns
- if (pSup->pArithExprInfo != NULL) {
- arithSup.pArithExpr = pSup->pArithExprInfo;
- arithmeticTreeTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
- } else {
- SSqlExpr* pExpr = pSup->pSqlExpr;
- memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, (size_t)(pExpr->resBytes * pOutput->num));
- }
-
- offset += pSup->field.bytes;
- }
-
- memcpy(pOutput->data, pbuf, (size_t)(pOutput->num * offset));
-
- tfree(pbuf);
- tfree(arithSup.data);
-
- return offset;
-}
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 114b74ae0394f34c207df46489ca45824fa2adc5..fdcfcb02de470a23afd552be81292c31572fcc55 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -23,7 +23,7 @@
#include "ttype.h"
#include "hash.h"
#include "tscUtil.h"
-#include "tschemautil.h"
+#include "qTableMeta.h"
#include "tsclient.h"
#include "ttokendef.h"
#include "taosdef.h"
@@ -38,19 +38,57 @@ enum {
TSDB_USE_CLI_TS = 1,
};
-static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
-static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema, char* str, char** end);
+static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
+static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
+ char *str, char **end);
+int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols,
+ int32_t allNullLen) {
+ ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols));
+ if (nRows > 0) {
+ // already init(bind multiple rows by single column)
+ if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
-static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) {
- errno = 0;
- *value = strtold(pToken->z, endPtr);
-
- // not a valid integer number, return error
- if ((*endPtr - pToken->z) != pToken->n) {
- return TK_ILLEGAL;
+ // default compareStat is ROW_COMPARE_NO_NEED
+ if (nBoundCols == 0) { // file input
+ pBuilder->memRowType = SMEM_ROW_DATA;
+ return TSDB_CODE_SUCCESS;
+ } else {
+ float boundRatio = ((float)nBoundCols / (float)nCols);
+
+ if (boundRatio < KVRatioKV) {
+ pBuilder->memRowType = SMEM_ROW_KV;
+ return TSDB_CODE_SUCCESS;
+ } else if (boundRatio > KVRatioData) {
+ pBuilder->memRowType = SMEM_ROW_DATA;
+ return TSDB_CODE_SUCCESS;
+ }
+ pBuilder->compareStat = ROW_COMPARE_NEED;
+
+ if (boundRatio < KVRatioPredict) {
+ pBuilder->memRowType = SMEM_ROW_KV;
+ } else {
+ pBuilder->memRowType = SMEM_ROW_DATA;
+ }
}
- return pToken->type;
+ pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
+
+ if (nRows > 0) {
+ pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo));
+ if (pBuilder->rowInfo == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ for (int i = 0; i < nRows; ++i) {
+ (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
+ (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
}
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
@@ -60,25 +98,23 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
int64_t useconds = 0;
char * pTokenEnd = *next;
- index = 0;
-
if (pToken->type == TK_NOW) {
useconds = taosGetTimestamp(timePrec);
} else if (strncmp(pToken->z, "0", 1) == 0 && pToken->n == 1) {
// do nothing
} else if (pToken->type == TK_INTEGER) {
- useconds = tsosStr2int64(pToken->z);
+ useconds = taosStr2int64(pToken->z);
} else {
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z);
+ return tscInvalidOperationMsg(error, "invalid timestamp format", pToken->z);
}
return TSDB_CODE_SUCCESS;
}
for (int k = pToken->n; pToken->z[k] != '\0'; k++) {
- if (pToken->z[k] == ' ' || pToken->z[k] == '\t') continue;
+ if (isspace(pToken->z[k])) continue;
if (pToken->z[k] == ',') {
*next = pTokenEnd;
*time = useconds;
@@ -103,15 +139,12 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
pTokenEnd += index;
if (valueToken.n < 2) {
- return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
- }
-
- if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
}
- if (timePrec == TSDB_TIME_PRECISION_MILLI) {
- interval /= 1000;
+ char unit = 0;
+ if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, &unit, timePrec) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (sToken.type == TK_PLUS) {
@@ -127,10 +160,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return TSDB_CODE_SUCCESS;
}
-static bool isNullStr(SStrToken* pToken) {
- return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) &&
- (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0));
-}
int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey,
int16_t timePrec) {
int64_t iv;
@@ -138,7 +167,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
char *endptr = NULL;
if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
- return tscInvalidSQLErrMsg(msg, "invalid numeric data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z);
}
switch (pSchema->type) {
@@ -161,7 +190,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
double dv = strtod(pToken->z, NULL);
*(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE);
} else {
- return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
}
}
break;
@@ -173,9 +202,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid tinyint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z);
} else if (!IS_VALID_TINYINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "data overflow", pToken->z);
}
*((uint8_t *)payload) = (uint8_t)iv;
@@ -189,9 +218,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid unsigned tinyint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z);
} else if (!IS_VALID_UTINYINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "unsigned tinyint data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z);
}
*((uint8_t *)payload) = (uint8_t)iv;
@@ -205,9 +234,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid smallint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z);
} else if (!IS_VALID_SMALLINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "smallint data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z);
}
*((int16_t *)payload) = (int16_t)iv;
@@ -221,9 +250,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid unsigned smallint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z);
} else if (!IS_VALID_USMALLINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "unsigned smallint data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z);
}
*((uint16_t *)payload) = (uint16_t)iv;
@@ -237,9 +266,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid int data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid int data", pToken->z);
} else if (!IS_VALID_INT(iv)) {
- return tscInvalidSQLErrMsg(msg, "int data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "int data overflow", pToken->z);
}
*((int32_t *)payload) = (int32_t)iv;
@@ -253,9 +282,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid unsigned int data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z);
} else if (!IS_VALID_UINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "unsigned int data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z);
}
*((uint32_t *)payload) = (uint32_t)iv;
@@ -269,9 +298,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid bigint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z);
} else if (!IS_VALID_BIGINT(iv)) {
- return tscInvalidSQLErrMsg(msg, "bigint data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
}
*((int64_t *)payload) = iv;
@@ -284,9 +313,9 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid unsigned bigint data", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z);
} else if (!IS_VALID_UBIGINT((uint64_t)iv)) {
- return tscInvalidSQLErrMsg(msg, "unsigned bigint data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z);
}
*((uint64_t *)payload) = iv;
@@ -299,11 +328,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
- return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
+ return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || isnan(dv)) {
- return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
+ return tscInvalidOperationMsg(msg, "illegal float data", pToken->z);
}
// *((float *)payload) = (float)dv;
@@ -317,11 +346,11 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
- return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
+ return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) {
- return tscInvalidSQLErrMsg(msg, "illegal double data", pToken->z);
+ return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
}
*((double *)payload) = dv;
@@ -334,7 +363,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
setVardataNull(payload, TSDB_DATA_TYPE_BINARY);
} else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { //todo refactor
- return tscInvalidSQLErrMsg(msg, "string data overflow", pToken->z);
+ return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
}
STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
@@ -351,7 +380,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
char buf[512] = {0};
snprintf(buf, tListLen(buf), "%s", strerror(errno));
- return tscInvalidSQLErrMsg(msg, buf, pToken->z);
+ return tscInvalidOperationMsg(msg, buf, pToken->z);
}
varDataSetLen(payload, output);
@@ -368,7 +397,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} else {
int64_t temp;
if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(msg, "invalid timestamp", pToken->z);
+ return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
}
*((int64_t *)payload) = temp;
@@ -385,7 +414,7 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
* The server time/client time should not be mixed up in one sql string
* Do not employ sort operation is not involved if server time is used.
*/
-static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
+int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) {
// once the data block is disordered, we do NOT keep previous timestamp any more
if (!pDataBlocks->ordered) {
return TSDB_CODE_SUCCESS;
@@ -410,38 +439,49 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
if (k <= pDataBlocks->prevTS && (pDataBlocks->tsSource == TSDB_USE_CLI_TS)) {
pDataBlocks->ordered = false;
+ tscWarn("NOT ordered input timestamp");
}
pDataBlocks->prevTS = k;
return TSDB_CODE_SUCCESS;
}
-int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int16_t timePrec, int32_t *len,
- char *tmpTokenBuf) {
- int32_t index = 0;
- SStrToken sToken = {0};
- char *payload = pDataBlocks->pData + pDataBlocks->size;
+int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf,
+ SInsertStatementParam *pInsertParam) {
+ int32_t index = 0;
+ SStrToken sToken = {0};
+
+ char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header
SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo;
- SSchema *schema = tscGetTableSchema(pDataBlocks->pTableMeta);
+ STableMeta * pTableMeta = pDataBlocks->pTableMeta;
+ SSchema * schema = tscGetTableSchema(pTableMeta);
+ SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
+ int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE;
+ int32_t kvLen = pBuilder->kvRowInitLen;
+ bool isParseBindParam = false;
+
+ initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound);
// 1. set the parsed value from sql string
- int32_t rowSize = 0;
for (int i = 0; i < spd->numOfBound; ++i) {
// the start position in data block buffer of current value in sql
int32_t colIndex = spd->boundedColumns[i];
- char *start = payload + spd->cols[colIndex].offset;
- SSchema *pSchema = &schema[colIndex];
- rowSize += pSchema->bytes;
+ char *start = row + spd->cols[colIndex].offset;
+
+ SSchema *pSchema = &schema[colIndex]; // get colId here
index = 0;
sToken = tStrGetToken(*str, &index, true);
*str += index;
if (sToken.type == TK_QUESTION) {
- if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
- return tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
+ if (!isParseBindParam) {
+ isParseBindParam = true;
+ }
+ if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str);
}
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
@@ -449,14 +489,15 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
continue;
}
- strcpy(pCmd->payload, "client out of memory");
+ strcpy(pInsertParam->msg, "client out of memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
- type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
- return tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
+ type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) ||
+ (sToken.n == 0) || (type == TK_RP)) {
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z);
}
// Remove quotation marks
@@ -467,12 +508,12 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
int32_t cnt = 0;
int32_t j = 0;
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
- return tscSQLSyntaxErrMsg(pCmd->payload, "too long string", sToken.z);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
}
-
+
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
- tmpTokenBuf[j] = sToken.z[k + 1];
+ tmpTokenBuf[j] = sToken.z[k + 1];
cnt++;
j++;
@@ -490,41 +531,44 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, SSqlCmd *pCmd, int1
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
- int32_t ret = tsParseOneColumn(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
+ int32_t toffset = -1;
+ int16_t colId = -1;
+ tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId);
+
+ int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset,
+ colId, &dataLen, &kvLen, pBuilder->compareStat);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
- tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
- return TSDB_CODE_TSC_INVALID_TIME_STAMP;
+ if (isPrimaryKey) {
+ TSKEY tsKey = memRowKey(row);
+ if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) {
+ tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z);
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
+ }
}
}
- // 2. set the null value for the columns that do not assign values
- if (spd->numOfBound < spd->numOfCols) {
- char *ptr = payload;
-
- for (int32_t i = 0; i < spd->numOfCols; ++i) {
- if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null
- if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
- varDataSetLen(ptr, sizeof(int8_t));
- *(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
- } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) {
- varDataSetLen(ptr, sizeof(int32_t));
- *(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL;
- } else {
- setNull(ptr, schema[i].type, schema[i].bytes);
+ if (!isParseBindParam) {
+ // 2. check and set convert flag
+ if (pBuilder->compareStat == ROW_COMPARE_NEED) {
+ checkAndConvertMemRow(row, dataLen, kvLen);
+ }
+
+ // 3. set the null value for the columns that do not assign values
+ if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) {
+ SDataRow dataRow = memRowDataBody(row);
+ for (int32_t i = 0; i < spd->numOfCols; ++i) {
+ if (spd->cols[i].valStat == VAL_STAT_NONE) {
+ tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset);
}
}
-
- ptr += schema[i].bytes;
}
-
- rowSize = (int32_t)(ptr - payload);
}
- *len = rowSize;
+ *len = getExtendedRowSize(pDataBlocks);
+
return TSDB_CODE_SUCCESS;
}
@@ -538,8 +582,30 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
return left > right ? 1 : -1;
}
}
+int32_t schemaIdxCompar(const void *lhs, const void *rhs) {
+ uint16_t left = *(uint16_t *)lhs;
+ uint16_t right = *(uint16_t *)rhs;
+
+ if (left == right) {
+ return 0;
+ } else {
+ return left > right ? 1 : -1;
+ }
+}
+
+int32_t boundIdxCompar(const void *lhs, const void *rhs) {
+ uint16_t left = *(uint16_t *)POINTER_SHIFT(lhs, sizeof(uint16_t));
+ uint16_t right = *(uint16_t *)POINTER_SHIFT(rhs, sizeof(uint16_t));
-int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSqlCmd* pCmd, int32_t* numOfRows, char *tmpTokenBuf) {
+ if (left == right) {
+ return 0;
+ } else {
+ return left > right ? 1 : -1;
+ }
+}
+
+int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SInsertStatementParam *pInsertParam,
+ int32_t* numOfRows, char *tmpTokenBuf) {
int32_t index = 0;
int32_t code = 0;
@@ -552,26 +618,33 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
int32_t precision = tinfo.precision;
+ int32_t extendedRowSize = getExtendedRowSize(pDataBlock);
+
+ if (TSDB_CODE_SUCCESS !=
+ (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound,
+ pDataBlock->boundColumnInfo.allNullLen))) {
+ return code;
+ }
while (1) {
index = 0;
sToken = tStrGetToken(*str, &index, false);
if (sToken.n == 0 || sToken.type != TK_LP) break;
*str += index;
- if ((*numOfRows) >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
+ if ((*numOfRows) >= maxRows || pDataBlock->size + extendedRowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
- code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
+ code = tscAllocateMemIfNeed(pDataBlock, extendedRowSize, &tSize);
if (code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
- strcpy(pCmd->payload, "client out of memory");
+ strcpy(pInsertParam->msg, "client out of memory");
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- ASSERT(tSize > maxRows);
+ ASSERT(tSize >= maxRows);
maxRows = tSize;
}
int32_t len = 0;
- code = tsParseOneRow(str, pDataBlock, pCmd, precision, &len, tmpTokenBuf);
+ code = tsParseOneRow(str, pDataBlock, precision, &len, tmpTokenBuf, pInsertParam);
if (code != TSDB_CODE_SUCCESS) { // error message has been set in tsParseOneRow, return directly
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
@@ -580,16 +653,17 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
index = 0;
sToken = tStrGetToken(*str, &index, false);
- *str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
- return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str);
}
+
+ *str += index;
(*numOfRows)++;
}
if ((*numOfRows) <= 0) {
- strcpy(pCmd->payload, "no any data points");
+ strcpy(pInsertParam->msg, "no any data points");
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
} else {
return TSDB_CODE_SUCCESS;
@@ -599,18 +673,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) {
pColInfo->numOfCols = numOfCols;
pColInfo->numOfBound = numOfCols;
-
+ pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode
pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t));
pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn));
+ pColInfo->colIdxInfo = NULL;
+ pColInfo->flen = 0;
+ pColInfo->allNullLen = 0;
+ int32_t nVar = 0;
for (int32_t i = 0; i < pColInfo->numOfCols; ++i) {
+ uint8_t type = pSchema[i].type;
if (i > 0) {
pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset;
+ pColInfo->cols[i].toffset = pColInfo->flen;
+ }
+ pColInfo->flen += TYPE_BYTES[type];
+ switch (type) {
+ case TSDB_DATA_TYPE_BINARY:
+ pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
+ ++nVar;
+ break;
+ case TSDB_DATA_TYPE_NCHAR:
+ pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
+ ++nVar;
+ break;
+ default:
+ break;
}
-
- pColInfo->cols[i].hasVal = true;
pColInfo->boundedColumns[i] = i;
}
+ pColInfo->allNullLen += pColInfo->flen;
+ pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT));
}
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
@@ -641,13 +734,13 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
return TSDB_CODE_SUCCESS;
}
-static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
+int32_t FORCE_INLINE tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->id.tid;
pBlocks->uid = pTableMeta->id.uid;
pBlocks->sversion = pTableMeta->sversion;
if (pBlocks->numOfRows + numOfRows >= INT16_MAX) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
} else {
pBlocks->numOfRows += numOfRows;
return TSDB_CODE_SUCCESS;
@@ -655,7 +748,7 @@ static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta,
}
// data block is disordered, sort it in ascending order
-void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
+void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData;
// size is less than the total size, since duplicated rows may be removed yet.
@@ -695,22 +788,92 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) {
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
+
+ dataBuf->prevTS = INT64_MIN;
}
-static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
- STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta);
-
+// data block is disordered, sort it in ascending order
+int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlkKeyInfo) {
+ SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData;
+ int16_t nRows = pBlocks->numOfRows;
+
+ // size is less than the total size, since duplicated rows may be removed yet.
+
+ // if use server time, this block must be ordered
+ if (dataBuf->tsSource == TSDB_USE_SERVER_TS) {
+ assert(dataBuf->ordered);
+ }
+ // allocate memory
+ size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
+ if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
+ char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc);
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
+ pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc;
+ }
+ memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
+
+ int32_t extendedRowSize = getExtendedRowSize(dataBuf);
+ SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
+ char * pBlockData = pBlocks->data;
+ int n = 0;
+ while (n < nRows) {
+ pBlkKeyTuple->skey = memRowKey(pBlockData);
+ pBlkKeyTuple->payloadAddr = pBlockData;
+
+ // next loop
+ pBlockData += extendedRowSize;
+ ++pBlkKeyTuple;
+ ++n;
+ }
+
+ if (!dataBuf->ordered) {
+ pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
+ qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
+
+ pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
+ int32_t i = 0;
+ int32_t j = 1;
+ while (j < nRows) {
+ TSKEY ti = (pBlkKeyTuple + i)->skey;
+ TSKEY tj = (pBlkKeyTuple + j)->skey;
+
+ if (ti == tj) {
+ ++j;
+ continue;
+ }
+
+ int32_t nextPos = (++i);
+ if (nextPos != j) {
+ memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
+ }
+ ++j;
+ }
+
+ dataBuf->ordered = true;
+ pBlocks->numOfRows = i + 1;
+ }
+
+ dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
+ dataBuf->prevTS = INT64_MIN;
+
+ return 0;
+}
+
+static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) {
int32_t maxNumOfRows;
- int32_t code = tscAllocateMemIfNeed(dataBuf, tinfo.rowSize, &maxNumOfRows);
+ int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows);
if (TSDB_CODE_SUCCESS != code) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- code = TSDB_CODE_TSC_INVALID_SQL;
+ code = TSDB_CODE_TSC_INVALID_OPERATION;
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
- code = tsParseValues(str, dataBuf, maxNumOfRows, pCmd, &numOfRows, tmpTokenBuf);
+ code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -718,7 +881,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {
- param->idx = pCmd->numOfParams++;
+ param->idx = pInsertParam->numOfParams++;
param->offset -= sizeof(SSubmitBlk);
}
}
@@ -726,7 +889,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, STableDataBlock
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
code = tsSetBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
- tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
+ tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", *str);
return code;
}
@@ -745,12 +908,11 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
const int32_t STABLE_INDEX = 1;
SSqlCmd * pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
+ SInsertStatementParam* pInsertParam = &pCmd->insertParam;
char *sql = *sqlstr;
- pSql->cmd.autoCreated = false;
-
// get the token of specified table
index = 0;
tableToken = tStrGetToken(sql, &index, false);
@@ -788,7 +950,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
if (numOfColList == 0 && (*boundColumn) != NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
@@ -804,13 +966,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
STableMetaInfo *pSTableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
- code = tscSetTableFullName(pSTableMetaInfo, &sToken, pSql);
+ code = tscSetTableFullName(&pSTableMetaInfo->name, &sToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
- pCmd->tagData.dataLen = 0;
+ tNameExtractFullName(&pSTableMetaInfo->name, pInsertParam->tagData.name);
+ pInsertParam->tagData.dataLen = 0;
code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
@@ -818,7 +980,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
- return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
+ return tscInvalidOperationMsg(pInsertParam->msg, "create table only from super table is allowed", sToken.z);
}
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
@@ -830,7 +992,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
index = 0;
sToken = tStrGetToken(sql, &index, false);
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
- return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
+ tscDestroyBoundColumnInfo(&spd);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword TAGS expected", sToken.z);
}
// parse the bound tags column
@@ -840,8 +1003,9 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn);
*/
char* end = NULL;
- code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end);
+ code = parseBoundColumns(pInsertParam, &spd, pTagSchema, sql, &end);
if (code != TSDB_CODE_SUCCESS) {
+ tscDestroyBoundColumnInfo(&spd);
return code;
}
@@ -859,11 +1023,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sql += index;
if (sToken.type != TK_LP) {
- return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
+ tscDestroyBoundColumnInfo(&spd);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
}
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
+ tscDestroyBoundColumnInfo(&spd);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -876,7 +1042,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if (TK_ILLEGAL == sToken.type) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return TSDB_CODE_TSC_INVALID_SQL;
+ tscDestroyBoundColumnInfo(&spd);
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
if (sToken.n == 0 || sToken.type == TK_RP) {
@@ -890,9 +1057,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
char tagVal[TSDB_MAX_TAGS_LEN];
- code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
+ code = tsParseOneColumn(pSchema, &sToken, tagVal, pInsertParam->msg, &sql, false, tinfo.precision);
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
+ tscDestroyBoundColumnInfo(&spd);
return code;
}
@@ -904,29 +1072,29 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) {
- return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
}
tdSortKVRowByColIdx(row);
- pCmd->tagData.dataLen = kvRowLen(row);
- if (pCmd->tagData.dataLen <= 0){
- return tscInvalidSQLErrMsg(pCmd->payload, "tag value expected", NULL);
+ pInsertParam->tagData.dataLen = kvRowLen(row);
+ if (pInsertParam->tagData.dataLen <= 0){
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
}
- char* pTag = realloc(pCmd->tagData.data, pCmd->tagData.dataLen);
+ char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen);
if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
kvRowCpy(pTag, row);
free(row);
- pCmd->tagData.data = pTag;
+ pInsertParam->tagData.data = pTag;
index = 0;
sToken = tStrGetToken(sql, &index, false);
sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
- return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", sToken.z);
}
/* parse columns after super table tags values.
@@ -939,7 +1107,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
int numOfColsAfterTags = 0;
if (sToken.type == TK_LP) {
if (*boundColumn != NULL) {
- return tscSQLSyntaxErrMsg(pCmd->payload, "bind columns again", sToken.z);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "bind columns again", sToken.z);
} else {
*boundColumn = &sToken.z[0];
}
@@ -961,7 +1129,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
}
if (numOfColsAfterTags == 0 && (*boundColumn) != NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
sToken = tStrGetToken(sql, &index, false);
@@ -970,38 +1138,36 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
sql = sToken.z;
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
+ return tscInvalidOperationMsg(pInsertParam->msg, "invalid table name", *sqlstr);
}
- int32_t ret = tscSetTableFullName(pTableMetaInfo, &tableToken, pSql);
+ int32_t ret = tscSetTableFullName(&pTableMetaInfo->name, &tableToken, pSql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
if (sql == NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
- code = tscGetTableMetaEx(pSql, pTableMetaInfo, true);
+ code = tscGetTableMetaEx(pSql, pTableMetaInfo, true, false);
if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) {
return code;
}
} else {
- sql = sToken.z;
-
- if (sql == NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (sToken.z == NULL) {
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "", sql);
}
- code = tscGetTableMetaEx(pSql, pTableMetaInfo, false);
- if (pCmd->curSql == NULL) {
+ sql = sToken.z;
+ code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, false);
+ if (pInsertParam->sql == NULL) {
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS);
}
}
*sqlstr = sql;
-
return code;
}
@@ -1015,22 +1181,28 @@ int validateTableName(char *tblName, int len, SStrToken* psTblToken) {
return tscValidateName(psTblToken);
}
-static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
- if (pCmd->dataSourceType != 0 && pCmd->dataSourceType != type) {
- return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sql);
+static int32_t validateDataSource(SInsertStatementParam *pInsertParam, int32_t type, const char *sql) {
+ uint32_t *insertType = &pInsertParam->insertType;
+ if (*insertType == TSDB_QUERY_TYPE_STMT_INSERT && type == TSDB_QUERY_TYPE_INSERT) {
+ return TSDB_CODE_SUCCESS;
}
- pCmd->dataSourceType = type;
+ if ((*insertType) != 0 && (*insertType) != type) {
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES and FILE are not allowed to mixed up", sql);
+ }
+
+ *insertType = type;
return TSDB_CODE_SUCCESS;
}
-static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SSchema* pSchema,
+static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo* pColInfo, SSchema* pSchema,
char* str, char **end) {
- pColInfo->numOfBound = 0;
+ int32_t nCols = pColInfo->numOfCols;
- memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * pColInfo->numOfCols);
- for(int32_t i = 0; i < pColInfo->numOfCols; ++i) {
- pColInfo->cols[i].hasVal = false;
+ pColInfo->numOfBound = 0;
+ memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols);
+ for (int32_t i = 0; i < nCols; ++i) {
+ pColInfo->cols[i].valStat = VAL_STAT_NONE;
}
int32_t code = TSDB_CODE_SUCCESS;
@@ -1040,10 +1212,12 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
str += index;
if (sToken.type != TK_LP) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
+ code = tscSQLSyntaxErrMsg(pInsertParam->msg, "( is expected", sToken.z);
goto _clean;
}
+ bool isOrdered = true;
+ int32_t lastColIdx = -1; // last column found
while (1) {
index = 0;
sToken = tStrGetToken(str, &index, false);
@@ -1064,36 +1238,102 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
bool findColumnIndex = false;
// todo speedup by using hash list
- for (int32_t t = 0; t < pColInfo->numOfCols; ++t) {
+ int32_t nScanned = 0, t = lastColIdx + 1;
+ while (t < nCols) {
if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
- if (pColInfo->cols[t].hasVal == true) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z);
+ if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
+ code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
goto _clean;
}
- pColInfo->cols[t].hasVal = true;
+ pColInfo->cols[t].valStat = VAL_STAT_HAS;
pColInfo->boundedColumns[pColInfo->numOfBound] = t;
- pColInfo->numOfBound += 1;
+ ++pColInfo->numOfBound;
findColumnIndex = true;
+ if (isOrdered && (lastColIdx > t)) {
+ isOrdered = false;
+ }
+ lastColIdx = t;
break;
}
+ ++t;
+ ++nScanned;
+ }
+ if (!findColumnIndex) {
+ t = 0;
+ int32_t nRemain = nCols - nScanned;
+ while (t < nRemain) {
+ if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) {
+ if (pColInfo->cols[t].valStat == VAL_STAT_HAS) {
+ code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z);
+ goto _clean;
+ }
+
+ pColInfo->cols[t].valStat = VAL_STAT_HAS;
+ pColInfo->boundedColumns[pColInfo->numOfBound] = t;
+ ++pColInfo->numOfBound;
+ findColumnIndex = true;
+ if (isOrdered && (lastColIdx > t)) {
+ isOrdered = false;
+ }
+ lastColIdx = t;
+ break;
+ }
+ ++t;
+ }
}
if (!findColumnIndex) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column/tag name", sToken.z);
+ code = tscInvalidOperationMsg(pInsertParam->msg, "invalid column/tag name", sToken.z);
goto _clean;
}
}
- memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0 , sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
+ pColInfo->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED;
+
+ if (!isOrdered) {
+ pColInfo->colIdxInfo = tcalloc(pColInfo->numOfBound, sizeof(SBoundIdxInfo));
+ if (pColInfo->colIdxInfo == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _clean;
+ }
+ SBoundIdxInfo *pColIdx = pColInfo->colIdxInfo;
+ for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) {
+ pColIdx[i].schemaColIdx = (uint16_t)pColInfo->boundedColumns[i];
+ pColIdx[i].boundIdx = i;
+ }
+ qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), schemaIdxCompar);
+ for (uint16_t i = 0; i < pColInfo->numOfBound; ++i) {
+ pColIdx[i].finalIdx = i;
+ }
+ qsort(pColIdx, pColInfo->numOfBound, sizeof(SBoundIdxInfo), boundIdxCompar);
+ }
+
+ memset(&pColInfo->boundedColumns[pColInfo->numOfBound], 0,
+ sizeof(int32_t) * (pColInfo->numOfCols - pColInfo->numOfBound));
+
return TSDB_CODE_SUCCESS;
- _clean:
- pCmd->curSql = NULL;
- pCmd->parseFinished = 1;
+_clean:
+ pInsertParam->sql = NULL;
return code;
}
+static int32_t getFileFullPath(SStrToken* pToken, char* output) {
+ char path[PATH_MAX] = {0};
+ strncpy(path, pToken->z, pToken->n);
+ strdequote(path);
+
+ wordexp_t full_path;
+ if (wordexp(path, &full_path, 0) != 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ tstrncpy(output, full_path.we_wordv[0], PATH_MAX);
+ wordfree(&full_path);
+ return TSDB_CODE_SUCCESS;
+}
+
/**
* parse insert sql
* @param pSql
@@ -1101,12 +1341,15 @@ static int32_t parseBoundColumns(SSqlCmd* pCmd, SParsedDataColInfo* pColInfo, SS
*/
int tsParseInsertSql(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
- char* str = pCmd->curSql;
+
+ SInsertStatementParam* pInsertParam = &pCmd->insertParam;
+ pInsertParam->objectId = pSql->self;
+ char* str = pInsertParam->sql;
int32_t totalNum = 0;
int32_t code = TSDB_CODE_SUCCESS;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
assert(pQueryInfo != NULL);
STableMetaInfo *pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo):tscGetMetaInfo(pQueryInfo, 0);
@@ -1116,21 +1359,17 @@ int tsParseInsertSql(SSqlObj *pSql) {
return code;
}
- if ((code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (NULL == pCmd->pTableBlockHashList) {
- pCmd->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- if (NULL == pCmd->pTableBlockHashList) {
+ if (NULL == pInsertParam->pTableBlockHashList) {
+ pInsertParam->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ if (NULL == pInsertParam->pTableBlockHashList) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _clean;
}
} else {
- str = pCmd->curSql;
+ str = pInsertParam->sql;
}
- tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->pTableBlockHashList);
+ tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pInsertParam->pTableBlockHashList);
while (1) {
int32_t index = 0;
@@ -1142,7 +1381,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
* if the data is from the data file, no data has been generated yet. So, there no data to
* merge or submit, save the file path and parse the file in other routines.
*/
- if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
+ if (TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
goto _clean;
}
@@ -1151,24 +1390,24 @@ int tsParseInsertSql(SSqlObj *pSql) {
* Otherwise, create the first submit block and submit to virtual node.
*/
if (totalNum == 0) {
- code = TSDB_CODE_TSC_INVALID_SQL;
+ code = TSDB_CODE_TSC_INVALID_OPERATION;
goto _clean;
} else {
break;
}
}
- pCmd->curSql = sToken.z;
+ pInsertParam->sql = sToken.z;
char buf[TSDB_TABLE_FNAME_LEN];
SStrToken sTblToken;
sTblToken.z = buf;
// Check if the table name available or not
if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z);
+ code = tscInvalidOperationMsg(pInsertParam->msg, "table name invalid", sToken.z);
goto _clean;
}
- if ((code = tscSetTableFullName(pTableMetaInfo, &sTblToken, pSql)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscSetTableFullName(&pTableMetaInfo->name, &sTblToken, pSql)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
@@ -1183,12 +1422,12 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
- pCmd->curSql = NULL;
+ pInsertParam->sql = NULL;
goto _clean;
}
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL);
+ code = tscInvalidOperationMsg(pInsertParam->msg, "insert data into super table is not supported", NULL);
goto _clean;
}
@@ -1197,71 +1436,62 @@ int tsParseInsertSql(SSqlObj *pSql) {
str += index;
if (sToken.n == 0 || (sToken.type != TK_FILE && sToken.type != TK_VALUES)) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
+ code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES or FILE required", sToken.z);
goto _clean;
}
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
if (sToken.type == TK_FILE) {
- if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
+ if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_FILE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
index = 0;
sToken = tStrGetToken(str, &index, false);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
+ code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean;
}
str += index;
if (sToken.n == 0) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
+ code = tscSQLSyntaxErrMsg(pInsertParam->msg, "file path is required following keyword FILE", sToken.z);
goto _clean;
}
- strncpy(pCmd->payload, sToken.z, sToken.n);
- strdequote(pCmd->payload);
-
- // todo refactor extract method
- wordexp_t full_path;
- if (wordexp(pCmd->payload, &full_path, 0) != 0) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z);
+ code = getFileFullPath(&sToken, pCmd->payload);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscInvalidOperationMsg(pInsertParam->msg, "invalid filename", sToken.z);
goto _clean;
}
-
- tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize);
- wordfree(&full_path);
-
} else {
if (bindedColumns == NULL) {
STableMeta *pTableMeta = pTableMetaInfo->pTableMeta;
-
- if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
+ if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
STableDataBlocks *dataBuf = NULL;
- int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
+ int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
goto _clean;
}
- code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
+ code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
} else { // bindedColumns != NULL
// insert into tablename(col1, col2,..., coln) values(v1, v2,... vn);
- STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
+ STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, 0)->pTableMeta;
- if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
+ if (validateDataSource(pInsertParam, TSDB_QUERY_TYPE_INSERT, sToken.z) != TSDB_CODE_SUCCESS) {
goto _clean;
}
STableDataBlocks *dataBuf = NULL;
- int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
+ int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
&dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
@@ -1269,22 +1499,22 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
SSchema *pSchema = tscGetTableSchema(pTableMeta);
- code = parseBoundColumns(pCmd, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
+ code = parseBoundColumns(pInsertParam, &dataBuf->boundColumnInfo, pSchema, bindedColumns, NULL);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
- if (dataBuf->boundColumnInfo.cols[0].hasVal == false) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", NULL);
+ if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) {
+ code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL);
goto _clean;
}
if (sToken.type != TK_VALUES) {
- code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z);
+ code = tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword VALUES is expected", sToken.z);
goto _clean;
}
- code = doParseInsertStatement(pCmd, &str, dataBuf, &totalNum);
+ code = doParseInsertStatement(pInsertParam, &str, dataBuf, &totalNum);
if (code != TSDB_CODE_SUCCESS) {
goto _clean;
}
@@ -1293,12 +1523,13 @@ int tsParseInsertSql(SSqlObj *pSql) {
}
// we need to keep the data blocks if there are parameters in the sql
- if (pCmd->numOfParams > 0) {
+ if (pInsertParam->numOfParams > 0) {
goto _clean;
}
- if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId
- if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
+ // merge according to vgId
+ if (!TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_STMT_INSERT) && taosHashGetSize(pInsertParam->pTableBlockHashList) > 0) {
+ if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
goto _clean;
}
}
@@ -1307,8 +1538,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
goto _clean;
_clean:
- pCmd->curSql = NULL;
- pCmd->parseFinished = 1;
+ pInsertParam->sql = NULL;
return code;
}
@@ -1323,59 +1553,66 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
SStrToken sToken = tStrGetToken(pSql->sqlstr, &index, false);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
- pCmd->count = 0;
+ pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
+ SInsertStatementParam* pInsertParam = &pCmd->insertParam;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
-
- TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
+ TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT);
sToken = tStrGetToken(pSql->sqlstr, &index, false);
if (sToken.type != TK_INTO) {
- return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
+ return tscSQLSyntaxErrMsg(pInsertParam->msg, "keyword INTO is expected", sToken.z);
}
- pCmd->curSql = sToken.z + sToken.n;
+ pInsertParam->sql = sToken.z + sToken.n;
return TSDB_CODE_SUCCESS;
}
int tsParseSql(SSqlObj *pSql, bool initial) {
int32_t ret = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
-
- if ((!pCmd->parseFinished) && (!initial)) {
- tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql);
+ if (!initial) {
+ tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->insertParam.sql);
}
- ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
+ ret = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) {
return ret;
}
if (tscIsInsertData(pSql->sqlstr)) {
if (initial && ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS)) {
+ strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
return ret;
}
- // make a backup as tsParseInsertSql may modify the string
ret = tsParseInsertSql(pSql);
- if ((pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
- } else {
+ if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
+ SInsertStatementParam* pInsertParam = &pCmd->insertParam;
+ tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, msg:%s, clear meta cache and retry ", pSql->self, pInsertParam->msg, tstrerror(ret));
+
tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
+
if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
ret = tsParseInsertSql(pSql);
}
}
+
+ if (ret != TSDB_CODE_SUCCESS) {
+ strncpy(pCmd->payload, pCmd->insertParam.msg, TSDB_DEFAULT_PAYLOAD_SIZE);
+ }
} else {
SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr);
- ret = tscToSQLCmd(pSql, &sqlInfo);
- if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0/* && sqlInfo.type == TSDB_SQL_NULL*/) {
- tscDebug("0x%"PRIx64 " parse sql failed, retry again after clear local meta cache", pSql->self);
+ ret = tscValidateSqlInfo(pSql, &sqlInfo);
+ if (ret == TSDB_CODE_TSC_INVALID_OPERATION && pSql->parseRetry < 1 && sqlInfo.type == TSDB_SQL_SELECT) {
+ tscDebug("0x%"PRIx64 " parse query sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
+
tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
- ret = tscToSQLCmd(pSql, &sqlInfo);
+ ret = tscValidateSqlInfo(pSql, &sqlInfo);
}
SqlInfoDestroy(&sqlInfo);
@@ -1390,30 +1627,25 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
return ret;
}
-static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) {
+static int doPackSendDataBlock(SSqlObj* pSql, SInsertStatementParam *pInsertParam, STableMeta* pTableMeta, int32_t numOfRows, STableDataBlocks *pTableDataBlocks) {
int32_t code = TSDB_CODE_SUCCESS;
- SSqlCmd *pCmd = &pSql->cmd;
- pSql->res.numOfRows = 0;
-
- assert(pCmd->numOfClause == 1);
- STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData);
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
- return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
+ return tscInvalidOperationMsg(pInsertParam->msg, "too many rows in sql, total number of rows should be less than 32767", NULL);
}
- if ((code = tscMergeTableDataBlocks(pSql, true)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscMergeTableDataBlocks(pInsertParam, true)) != TSDB_CODE_SUCCESS) {
return code;
}
- STableDataBlocks *pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
+ STableDataBlocks *pDataBlock = taosArrayGetP(pInsertParam->pDataBlocks, 0);
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
return code;
}
- return tscProcessSql(pSql);
+ return TSDB_CODE_SUCCESS;
}
typedef struct SImportFileSupport {
@@ -1458,32 +1690,33 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
// accumulate the total submit records
pParentSql->res.numOfRows += pSql->res.numOfRows;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
- destroyTableNameList(pCmd);
+ SInsertStatementParam *pInsertParam = &pCmd->insertParam;
+ destroyTableNameList(pInsertParam);
- pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- if (pCmd->pTableBlockHashList == NULL) {
+ if (pInsertParam->pTableBlockHashList == NULL) {
+ pInsertParam->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ if (pInsertParam->pTableBlockHashList == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
}
STableDataBlocks *pTableDataBlock = NULL;
- int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
+ int32_t ret = tscGetDataBlockFromList(pInsertParam->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
+ sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta,
+ &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
- tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
+ tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -1503,7 +1736,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
strtolower(line, line);
int32_t len = 0;
- code = tsParseOneRow(&lineptr, pTableDataBlock, pCmd, tinfo.precision, &len, tokenBuf);
+ code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam);
if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code;
break;
@@ -1522,12 +1755,15 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS) {
if (count > 0) {
- code = doPackSendDataBlock(pSql, count, pTableDataBlock);
- if (code == TSDB_CODE_SUCCESS) {
- return;
- } else {
+ pSql->res.numOfRows = 0;
+ code = doPackSendDataBlock(pSql, pInsertParam, pTableMeta, count, pTableDataBlock);
+ if (code != TSDB_CODE_SUCCESS) {
+ pParentSql->res.code = code;
goto _error;
}
+
+ tscBuildAndSendRequest(pSql, NULL);
+ return;
} else {
taos_free_result(pSql);
tfree(pSupporter);
@@ -1536,13 +1772,14 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
- int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows;
+ int32_t v = (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
return;
}
}
_error:
+ pParentSql->res.code = code;
tfree(tokenBuf);
tfree(line);
taos_free_result(pSql);
@@ -1558,11 +1795,12 @@ void tscImportDataFromFile(SSqlObj *pSql) {
return;
}
- assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && strlen(pCmd->payload) != 0);
+ SInsertStatementParam* pInsertParam = &pCmd->insertParam;
+ assert(TSDB_QUERY_HAS_TYPE(pInsertParam->insertType, TSDB_QUERY_TYPE_FILE_INSERT) && strlen(pCmd->payload) != 0);
+ pCmd->active = pCmd->pQueryInfo;
SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport));
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
- pCmd->count = 1;
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c
new file mode 100644
index 0000000000000000000000000000000000000000..1bad1c72409baa9921a096e500c71a2e1a172ad4
--- /dev/null
+++ b/src/client/src/tscParseLineProtocol.c
@@ -0,0 +1,2236 @@
+#include
+#include
+#include
+#include
+
+#include "os.h"
+#include "osString.h"
+#include "ttype.h"
+#include "tmd5.h"
+#include "tstrbuild.h"
+#include "tname.h"
+#include "hash.h"
+#include "tskiplist.h"
+
+#include "tscUtil.h"
+#include "tsclient.h"
+#include "tscLog.h"
+
+#include "taos.h"
+
+typedef struct {
+ char sTableName[TSDB_TABLE_NAME_LEN];
+ SHashObj* tagHash;
+ SHashObj* fieldHash;
+ SArray* tags; //SArray
+ SArray* fields; //SArray
+ uint8_t precision;
+} SSmlSTableSchema;
+
+typedef struct {
+ char* key;
+ uint8_t type;
+ int16_t length;
+ char* value;
+
+ uint32_t fieldSchemaIdx;
+} TAOS_SML_KV;
+
+typedef struct {
+ char* stableName;
+
+ char* childTableName;
+ TAOS_SML_KV* tags;
+ int32_t tagNum;
+
+ // first kv must be timestamp
+ TAOS_SML_KV* fields;
+ int32_t fieldNum;
+
+ uint32_t schemaIdx;
+} TAOS_SML_DATA_POINT;
+
+typedef enum {
+ SML_TIME_STAMP_NOW,
+ SML_TIME_STAMP_SECONDS,
+ SML_TIME_STAMP_MILLI_SECONDS,
+ SML_TIME_STAMP_MICRO_SECONDS,
+ SML_TIME_STAMP_NANO_SECONDS
+} SMLTimeStampType;
+
+typedef struct {
+ uint64_t id;
+} SSmlLinesInfo;
+
+//=================================================================================================
+
+static uint64_t linesSmlHandleId = 0;
+
+uint64_t genLinesSmlId() {
+ uint64_t id;
+
+ do {
+ id = atomic_add_fetch_64(&linesSmlHandleId, 1);
+ } while (id == 0);
+
+ return id;
+}
+
+int compareSmlColKv(const void* p1, const void* p2) {
+ TAOS_SML_KV* kv1 = (TAOS_SML_KV*)p1;
+ TAOS_SML_KV* kv2 = (TAOS_SML_KV*)p2;
+ int kvLen1 = (int)strlen(kv1->key);
+ int kvLen2 = (int)strlen(kv2->key);
+ int res = strncasecmp(kv1->key, kv2->key, MIN(kvLen1, kvLen2));
+ if (res != 0) {
+ return res;
+ } else {
+ return kvLen1-kvLen2;
+ }
+}
+
+typedef enum {
+ SCHEMA_ACTION_CREATE_STABLE,
+ SCHEMA_ACTION_ADD_COLUMN,
+ SCHEMA_ACTION_ADD_TAG,
+ SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
+ SCHEMA_ACTION_CHANGE_TAG_SIZE,
+} ESchemaAction;
+
+typedef struct {
+ char sTableName[TSDB_TABLE_NAME_LEN];
+ SArray* tags; //SArray
+ SArray* fields; //SArray
+} SCreateSTableActionInfo;
+
+typedef struct {
+ char sTableName[TSDB_TABLE_NAME_LEN];
+ SSchema* field;
+} SAlterSTableActionInfo;
+
+typedef struct {
+ ESchemaAction action;
+ union {
+ SCreateSTableActionInfo createSTable;
+ SAlterSTableActionInfo alterSTable;
+ };
+} SSchemaAction;
+
+static int32_t getFieldBytesFromSmlKv(TAOS_SML_KV* kv, int32_t* bytes, uint64_t id) {
+ if (!IS_VAR_DATA_TYPE(kv->type)) {
+ *bytes = tDataTypes[kv->type].bytes;
+ } else {
+ if (kv->type == TSDB_DATA_TYPE_NCHAR) {
+ char* ucs = malloc(kv->length * TSDB_NCHAR_SIZE + 1);
+ int32_t bytesNeeded = 0;
+ bool succ = taosMbsToUcs4(kv->value, kv->length, ucs, kv->length * TSDB_NCHAR_SIZE, &bytesNeeded);
+ if (!succ) {
+ free(ucs);
+ tscError("SML:0x%"PRIx64" convert nchar string to UCS4_LE failed:%s", id, kv->value);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ free(ucs);
+ *bytes = bytesNeeded + VARSTR_HEADER_SIZE;
+ } else if (kv->type == TSDB_DATA_TYPE_BINARY) {
+ *bytes = kv->length + VARSTR_HEADER_SIZE;
+ }
+ }
+ return 0;
+}
+
+static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* array, SSmlLinesInfo* info) {
+ SSchema* pField = NULL;
+ size_t* pFieldIdx = taosHashGet(hash, smlKv->key, strlen(smlKv->key));
+ size_t fieldIdx = -1;
+ int32_t code = 0;
+ if (pFieldIdx) {
+ fieldIdx = *pFieldIdx;
+ pField = taosArrayGet(array, fieldIdx);
+
+ if (pField->type != smlKv->type) {
+ tscError("SML:0x%"PRIx64" type mismatch. key %s, type %d. type before %d", info->id, smlKv->key, smlKv->type, pField->type);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int32_t bytes = 0;
+ code = getFieldBytesFromSmlKv(smlKv, &bytes, info->id);
+ if (code != 0) {
+ return code;
+ }
+ pField->bytes = MAX(pField->bytes, bytes);
+
+ } else {
+ SSchema field = {0};
+ size_t tagKeyLen = strlen(smlKv->key);
+ strncpy(field.name, smlKv->key, tagKeyLen);
+ field.name[tagKeyLen] = '\0';
+ field.type = smlKv->type;
+
+ int32_t bytes = 0;
+ code = getFieldBytesFromSmlKv(smlKv, &bytes, info->id);
+ if (code != 0) {
+ return code;
+ }
+ field.bytes = bytes;
+
+ pField = taosArrayPush(array, &field);
+ fieldIdx = taosArrayGetSize(array) - 1;
+ taosHashPut(hash, field.name, tagKeyLen, &fieldIdx, sizeof(fieldIdx));
+ }
+
+ smlKv->fieldSchemaIdx = (uint32_t)fieldIdx;
+
+ return 0;
+}
+
+static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
+ SSmlLinesInfo* info) {
+ tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
+ qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
+
+ SStringBuilder sb; memset(&sb, 0, sizeof(sb));
+ char sTableName[TSDB_TABLE_NAME_LEN] = {0};
+ strtolower(sTableName, point->stableName);
+ taosStringBuilderAppendString(&sb, sTableName);
+ for (int j = 0; j < point->tagNum; ++j) {
+ taosStringBuilderAppendChar(&sb, ',');
+ TAOS_SML_KV* tagKv = point->tags + j;
+ char tagName[TSDB_COL_NAME_LEN] = {0};
+ strtolower(tagName, tagKv->key);
+ taosStringBuilderAppendString(&sb, tagName);
+ taosStringBuilderAppendChar(&sb, '=');
+ taosStringBuilderAppend(&sb, tagKv->value, tagKv->length);
+ }
+ size_t len = 0;
+ char* keyJoined = taosStringBuilderGetResult(&sb, &len);
+ MD5_CTX context;
+ MD5Init(&context);
+ MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
+ MD5Final(&context);
+ *tableNameLen = snprintf(tableName, *tableNameLen,
+ "t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
+ context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
+ context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
+ context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
+ taosStringBuilderDestroy(&sb);
+ tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName);
+ return 0;
+}
+
+static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ SHashObj* sname2shema = taosHashInit(32,
+ taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+
+ for (int i = 0; i < numPoint; ++i) {
+ TAOS_SML_DATA_POINT* point = &points[i];
+ size_t stableNameLen = strlen(point->stableName);
+ size_t* pStableIdx = taosHashGet(sname2shema, point->stableName, stableNameLen);
+ SSmlSTableSchema* pStableSchema = NULL;
+ size_t stableIdx = -1;
+ if (pStableIdx) {
+ pStableSchema= taosArrayGet(stableSchemas, *pStableIdx);
+ stableIdx = *pStableIdx;
+ } else {
+ SSmlSTableSchema schema;
+ strncpy(schema.sTableName, point->stableName, stableNameLen);
+ schema.sTableName[stableNameLen] = '\0';
+ schema.fields = taosArrayInit(64, sizeof(SSchema));
+ schema.tags = taosArrayInit(8, sizeof(SSchema));
+ schema.tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ schema.fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+
+ pStableSchema = taosArrayPush(stableSchemas, &schema);
+ stableIdx = taosArrayGetSize(stableSchemas) - 1;
+ taosHashPut(sname2shema, schema.sTableName, stableNameLen, &stableIdx, sizeof(size_t));
+ }
+
+ for (int j = 0; j < point->tagNum; ++j) {
+ TAOS_SML_KV* tagKv = point->tags + j;
+ if (!point->childTableName) {
+ char childTableName[TSDB_TABLE_NAME_LEN];
+ int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
+ getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
+ point->childTableName = calloc(1, tableNameLen+1);
+ strncpy(point->childTableName, childTableName, tableNameLen);
+ point->childTableName[tableNameLen] = '\0';
+ }
+
+ code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, tagKv->key);
+ return code;
+ }
+ }
+
+ for (int j = 0; j < point->fieldNum; ++j) {
+ TAOS_SML_KV* fieldKv = point->fields + j;
+ code = buildSmlKvSchema(fieldKv, pStableSchema->fieldHash, pStableSchema->fields, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, fieldKv->key);
+ return code;
+ }
+ }
+
+ point->schemaIdx = (uint32_t)stableIdx;
+ }
+
+ size_t numStables = taosArrayGetSize(stableSchemas);
+ for (int32_t i = 0; i < numStables; ++i) {
+ SSmlSTableSchema* schema = taosArrayGet(stableSchemas, i);
+ taosHashCleanup(schema->tagHash);
+ taosHashCleanup(schema->fieldHash);
+ }
+ taosHashCleanup(sname2shema);
+
+ tscDebug("SML:0x%"PRIx64" build point schema succeed. num of super table: %zu", info->id, numStables);
+ for (int32_t i = 0; i < numStables; ++i) {
+ SSmlSTableSchema* schema = taosArrayGet(stableSchemas, i);
+ tscDebug("\ttable name: %s, tags number: %zu, fields number: %zu", schema->sTableName,
+ taosArrayGetSize(schema->tags), taosArrayGetSize(schema->fields));
+ }
+
+ return 0;
+}
+
+static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[],
+ SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) {
+ char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0};
+ strtolower(fieldNameLowerCase, pointColField->name);
+
+ size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase));
+ if (pDbIndex) {
+ SSchema* dbAttr = taosArrayGet(dbAttrArray, *pDbIndex);
+ assert(strcasecmp(dbAttr->name, pointColField->name) == 0);
+ if (pointColField->type != dbAttr->type) {
+ tscError("SML:0x%"PRIx64" point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id, pointColField->name,
+ pointColField->type, dbAttr->type);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (IS_VAR_DATA_TYPE(pointColField->type) && (pointColField->bytes > dbAttr->bytes)) {
+ if (isTag) {
+ action->action = SCHEMA_ACTION_CHANGE_TAG_SIZE;
+ } else {
+ action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
+ }
+ memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
+ memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN);
+ action->alterSTable.field = pointColField;
+ *actionNeeded = true;
+ }
+ } else {
+ if (isTag) {
+ action->action = SCHEMA_ACTION_ADD_TAG;
+ } else {
+ action->action = SCHEMA_ACTION_ADD_COLUMN;
+ }
+ memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
+ memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN);
+ action->alterSTable.field = pointColField;
+ *actionNeeded = true;
+ }
+ if (*actionNeeded) {
+ tscDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, fieldNameLowerCase,
+ action->action);
+ }
+ return 0;
+}
+
+static int32_t buildColumnDescription(SSchema* field,
+ char* buf, int32_t bufSize, int32_t* outBytes) {
+ uint8_t type = field->type;
+
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t bytes = field->bytes - VARSTR_HEADER_SIZE;
+ if (type == TSDB_DATA_TYPE_NCHAR) {
+ bytes = bytes/TSDB_NCHAR_SIZE;
+ }
+ int out = snprintf(buf, bufSize,"%s %s(%d)",
+ field->name,tDataTypes[field->type].name, bytes);
+ *outBytes = out;
+ } else {
+ int out = snprintf(buf, bufSize, "%s %s",
+ field->name, tDataTypes[type].name);
+ *outBytes = out;
+ }
+
+ return 0;
+}
+
+
+static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ int32_t outBytes = 0;
+ char *result = (char *)calloc(1, tsMaxSQLStringLen+1);
+ int32_t capacity = tsMaxSQLStringLen + 1;
+
+ tscDebug("SML:0x%"PRIx64" apply schema action. action: %d", info->id, action->action);
+ switch (action->action) {
+ case SCHEMA_ACTION_ADD_COLUMN: {
+ int n = sprintf(result, "alter stable %s add column ", action->alterSTable.sTableName);
+ buildColumnDescription(action->alterSTable.field, result+n, capacity-n, &outBytes);
+ TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
+ code = taos_errno(res);
+ char* errStr = taos_errstr(res);
+ char* begin = strstr(errStr, "duplicated column names");
+ bool tscDupColNames = (begin != NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error: %s", info->id, errStr);
+ }
+ taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || code == TSDB_CODE_MND_TAG_ALREAY_EXIST || tscDupColNames) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ taosMsleep(500);
+ }
+ break;
+ }
+ case SCHEMA_ACTION_ADD_TAG: {
+ int n = sprintf(result, "alter stable %s add tag ", action->alterSTable.sTableName);
+ buildColumnDescription(action->alterSTable.field,
+ result+n, capacity-n, &outBytes);
+ TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
+ code = taos_errno(res);
+ char* errStr = taos_errstr(res);
+ char* begin = strstr(errStr, "duplicated column names");
+ bool tscDupColNames = (begin != NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
+ taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_TAG_ALREAY_EXIST || code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || tscDupColNames) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ taosMsleep(500);
+ }
+ break;
+ }
+ case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
+ int n = sprintf(result, "alter stable %s modify column ", action->alterSTable.sTableName);
+ buildColumnDescription(action->alterSTable.field, result+n,
+ capacity-n, &outBytes);
+ TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
+ code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
+ taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_INVALID_COLUMN_LENGTH || code == TSDB_CODE_TSC_INVALID_COLUMN_LENGTH) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ taosMsleep(500);
+ }
+ break;
+ }
+ case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
+ int n = sprintf(result, "alter stable %s modify tag ", action->alterSTable.sTableName);
+ buildColumnDescription(action->alterSTable.field, result+n,
+ capacity-n, &outBytes);
+ TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
+ code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
+ taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_INVALID_TAG_LENGTH || code == TSDB_CODE_TSC_INVALID_TAG_LENGTH) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ taosMsleep(500);
+ }
+ break;
+ }
+ case SCHEMA_ACTION_CREATE_STABLE: {
+ int n = sprintf(result, "create stable %s (", action->createSTable.sTableName);
+ char* pos = result + n; int freeBytes = capacity - n;
+ size_t numCols = taosArrayGetSize(action->createSTable.fields);
+ for (int32_t i = 0; i < numCols; ++i) {
+ SSchema* field = taosArrayGet(action->createSTable.fields, i);
+ buildColumnDescription(field, pos, freeBytes, &outBytes);
+ pos += outBytes; freeBytes -= outBytes;
+ *pos = ','; ++pos; --freeBytes;
+ }
+ --pos; ++freeBytes;
+
+ outBytes = snprintf(pos, freeBytes, ") tags (");
+ pos += outBytes; freeBytes -= outBytes;
+
+ size_t numTags = taosArrayGetSize(action->createSTable.tags);
+ for (int32_t i = 0; i < numTags; ++i) {
+ SSchema* field = taosArrayGet(action->createSTable.tags, i);
+ buildColumnDescription(field, pos, freeBytes, &outBytes);
+ pos += outBytes; freeBytes -= outBytes;
+ *pos = ','; ++pos; --freeBytes;
+ }
+ pos--; ++freeBytes;
+ outBytes = snprintf(pos, freeBytes, ")");
+ TAOS_RES* res = taos_query(taos, result);
+ code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
+ taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ taosMsleep(500);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ free(result);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64 " apply schema action failure. %s", info->id, tstrerror(code));
+ }
+ return code;
+}
+
+static int32_t destroySmlSTableSchema(SSmlSTableSchema* schema) {
+ taosHashCleanup(schema->tagHash);
+ taosHashCleanup(schema->fieldHash);
+ taosArrayDestroy(schema->tags);
+ taosArrayDestroy(schema->fields);
+ return 0;
+}
+
+static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
+ schema->tags = taosArrayInit(8, sizeof(SSchema));
+ schema->fields = taosArrayInit(64, sizeof(SSchema));
+ schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+
+ tstrncpy(schema->sTableName, tableName, strlen(tableName)+1);
+ schema->precision = tableMeta->tableInfo.precision;
+ for (int i=0; itableInfo.numOfColumns; ++i) {
+ SSchema field;
+ tstrncpy(field.name, tableMeta->schema[i].name, strlen(tableMeta->schema[i].name)+1);
+ field.type = tableMeta->schema[i].type;
+ field.bytes = tableMeta->schema[i].bytes;
+ taosArrayPush(schema->fields, &field);
+ size_t fieldIndex = taosArrayGetSize(schema->fields) - 1;
+ taosHashPut(schema->fieldHash, field.name, strlen(field.name), &fieldIndex, sizeof(fieldIndex));
+ }
+
+ for (int i=0; itableInfo.numOfTags; ++i) {
+ int j = i + tableMeta->tableInfo.numOfColumns;
+ SSchema field;
+ tstrncpy(field.name, tableMeta->schema[j].name, strlen(tableMeta->schema[j].name)+1);
+ field.type = tableMeta->schema[j].type;
+ field.bytes = tableMeta->schema[j].bytes;
+ taosArrayPush(schema->tags, &field);
+ size_t tagIndex = taosArrayGetSize(schema->tags) - 1;
+ taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex));
+ }
+ tscDebug("SML:0x%"PRIx64 " load table schema succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
+ info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTableMeta, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ int32_t retries = 0;
+ STableMeta* tableMeta = NULL;
+ while (retries++ < TSDB_MAX_REPLICA && tableMeta == NULL) {
+ STscObj* pObj = (STscObj*)taos;
+ if (pObj == NULL || pObj->signature != pObj) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName);
+
+ char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
+ strtolower(tableNameLowerCase, tableName);
+
+ char sql[256];
+ snprintf(sql, 256, "describe %s", tableNameLowerCase);
+ TAOS_RES* res = taos_query(taos, sql);
+ code = taos_errno(res);
+ if (code != 0) {
+ tscError("SML:0x%" PRIx64 " describe table failure. %s", info->id, taos_errstr(res));
+ taos_free_result(res);
+ return code;
+ }
+ taos_free_result(res);
+
+ SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
+ if (pSql == NULL) {
+ tscError("SML:0x%" PRIx64 " failed to allocate memory, reason:%s", info->id, strerror(errno));
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return code;
+ }
+ pSql->pTscObj = taos;
+ pSql->signature = pSql;
+ pSql->fp = NULL;
+
+ registerSqlObj(pSql);
+ SStrToken tableToken = {.z = tableNameLowerCase, .n = (uint32_t)strlen(tableNameLowerCase), .type = TK_ID};
+ tGetToken(tableNameLowerCase, &tableToken.type);
+ // Check if the table name available or not
+ if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
+ code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ sprintf(pSql->cmd.payload, "table name is invalid");
+ taosReleaseRef(tscObjRef, pSql->self);
+ return code;
+ }
+
+ SName sname = {0};
+ if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) {
+ taosReleaseRef(tscObjRef, pSql->self);
+ return code;
+ }
+ char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
+ memset(fullTableName, 0, tListLen(fullTableName));
+ tNameExtractFullName(&sname, fullTableName);
+ taosReleaseRef(tscObjRef, pSql->self);
+
+ size_t size = 0;
+ taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size);
+ }
+
+ if (tableMeta != NULL) {
+ *pTableMeta = tableMeta;
+ return TSDB_CODE_SUCCESS;
+ } else {
+ tscError("SML:0x%" PRIx64 " failed to retrieve table meta. super table name: %s", info->id, tableName);
+ return TSDB_CODE_TSC_NO_META_CACHED;
+ }
+}
+
+static int32_t loadTableSchemaFromDB(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ STableMeta* tableMeta = NULL;
+ code = retrieveTableMeta(taos, tableName, &tableMeta, info);
+ if (code == TSDB_CODE_SUCCESS) {
+ assert(tableMeta != NULL);
+ fillDbSchema(tableMeta, tableName, schema, info);
+ free(tableMeta);
+ tableMeta = NULL;
+ }
+
+ return code;
+}
+
+static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ size_t numStable = taosArrayGetSize(stableSchemas);
+ for (int i = 0; i < numStable; ++i) {
+ SSmlSTableSchema* pointSchema = taosArrayGet(stableSchemas, i);
+ SSmlSTableSchema dbSchema;
+ memset(&dbSchema, 0, sizeof(SSmlSTableSchema));
+
+ code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
+ if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
+ SSchemaAction schemaAction = {0};
+ schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
+ memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo));
+ memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN);
+ schemaAction.createSTable.tags = pointSchema->tags;
+ schemaAction.createSTable.fields = pointSchema->fields;
+ applySchemaAction(taos, &schemaAction, info);
+ code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
+ return code;
+ } else {
+ pointSchema->precision = dbSchema.precision;
+ destroySmlSTableSchema(&dbSchema);
+ }
+ } else if (code == TSDB_CODE_SUCCESS) {
+ size_t pointTagSize = taosArrayGetSize(pointSchema->tags);
+ size_t pointFieldSize = taosArrayGetSize(pointSchema->fields);
+
+ SHashObj* dbTagHash = dbSchema.tagHash;
+ SHashObj* dbFieldHash = dbSchema.fieldHash;
+
+ for (int j = 0; j < pointTagSize; ++j) {
+ SSchema* pointTag = taosArrayGet(pointSchema->tags, j);
+ SSchemaAction schemaAction = {0};
+ bool actionNeeded = false;
+ generateSchemaAction(pointTag, dbTagHash, dbSchema.tags, true, pointSchema->sTableName,
+ &schemaAction, &actionNeeded, info);
+ if (actionNeeded) {
+ code = applySchemaAction(taos, &schemaAction, info);
+ if (code != 0) {
+ destroySmlSTableSchema(&dbSchema);
+ return code;
+ }
+ }
+ }
+
+ SSchema* pointColTs = taosArrayGet(pointSchema->fields, 0);
+ SSchema* dbColTs = taosArrayGet(dbSchema.fields, 0);
+ memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN);
+
+ for (int j = 1; j < pointFieldSize; ++j) {
+ SSchema* pointCol = taosArrayGet(pointSchema->fields, j);
+ SSchemaAction schemaAction = {0};
+ bool actionNeeded = false;
+ generateSchemaAction(pointCol, dbFieldHash, dbSchema.fields,false, pointSchema->sTableName,
+ &schemaAction, &actionNeeded, info);
+ if (actionNeeded) {
+ code = applySchemaAction(taos, &schemaAction, info);
+ if (code != 0) {
+ destroySmlSTableSchema(&dbSchema);
+ return code;
+ }
+ }
+ }
+
+ pointSchema->precision = dbSchema.precision;
+
+ destroySmlSTableSchema(&dbSchema);
+ } else {
+ tscError("SML:0x%"PRIx64" load table meta error: %s", info->id, tstrerror(code));
+ return code;
+ }
+ }
+ return 0;
+}
+
+static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName,
+ SArray* tagsSchema, SArray* tagsBind, SSmlLinesInfo* info) {
+ size_t numTags = taosArrayGetSize(tagsSchema);
+ char* sql = malloc(tsMaxSQLStringLen+1);
+ if (sql == NULL) {
+ tscError("malloc sql memory error");
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ int freeBytes = tsMaxSQLStringLen + 1;
+ sprintf(sql, "create table if not exists %s using %s", cTableName, sTableName);
+
+ snprintf(sql+strlen(sql), freeBytes-strlen(sql), "(");
+ for (int i = 0; i < numTags; ++i) {
+ SSchema* tagSchema = taosArrayGet(tagsSchema, i);
+ snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", tagSchema->name);
+ }
+ snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")");
+
+ snprintf(sql + strlen(sql), freeBytes-strlen(sql), " tags (");
+
+ for (int i = 0; i < numTags; ++i) {
+ snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,");
+ }
+ snprintf(sql + strlen(sql) - 1, freeBytes-strlen(sql)+1, ")");
+ sql[strlen(sql)] = '\0';
+
+ tscDebug("SML:0x%"PRIx64" create table : %s", info->id, sql);
+
+ TAOS_STMT* stmt = taos_stmt_init(taos);
+ if (stmt == NULL) {
+ free(sql);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ int32_t code;
+ code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
+ free(sql);
+
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_prepare returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+
+ code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind));
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_bind_param returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+
+ code = taos_stmt_execute(stmt);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_execute returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+
+ code = taos_stmt_close(stmt);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_close return %d:%s", info->id, code, tstrerror(code));
+ return code;
+ }
+ return code;
+}
+
+static int32_t doInsertChildTableWithStmt(TAOS* taos, char* sql, char* cTableName, SArray* batchBind, SSmlLinesInfo* info) {
+ int32_t code = 0;
+
+ TAOS_STMT* stmt = taos_stmt_init(taos);
+ if (stmt == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
+
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_prepare return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+
+ bool tryAgain = false;
+ int32_t try = 0;
+ do {
+ code = taos_stmt_set_tbname(stmt, cTableName);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_set_tbname return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+
+ size_t rows = taosArrayGetSize(batchBind);
+ for (int32_t i = 0; i < rows; ++i) {
+ TAOS_BIND* colsBinds = taosArrayGetP(batchBind, i);
+ code = taos_stmt_bind_param(stmt, colsBinds);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_bind_param return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+ code = taos_stmt_add_batch(stmt);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_add_batch return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
+ return code;
+ }
+ }
+
+ code = taos_stmt_execute(stmt);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_stmt_execute return %d:%s, try:%d", info->id, code, tstrerror(code), try);
+ }
+
+ tryAgain = false;
+ if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID
+ || code == TSDB_CODE_VND_INVALID_VGROUP_ID
+ || code == TSDB_CODE_TDB_TABLE_RECONFIGURE
+ || code == TSDB_CODE_APP_NOT_READY
+ || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && try++ < TSDB_MAX_REPLICA) {
+ tryAgain = true;
+ }
+
+ if (code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ int32_t code2 = taos_errno(res2);
+ if (code2 != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " insert child table. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ if (tryAgain) {
+ taosMsleep(100 * (2 << try));
+ }
+ }
+ if (code == TSDB_CODE_APP_NOT_READY || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ if (tryAgain) {
+ taosMsleep( 100 * (2 << try));
+ }
+ }
+ } while (tryAgain);
+
+
+ taos_stmt_close(stmt);
+ return code;
+}
+
+static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* colsSchema, SArray* rowsBind, size_t rowSize, SSmlLinesInfo* info) {
+ size_t numCols = taosArrayGetSize(colsSchema);
+ char* sql = malloc(tsMaxSQLStringLen+1);
+ if (sql == NULL) {
+ tscError("malloc sql memory error");
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ int32_t freeBytes = tsMaxSQLStringLen + 1 ;
+ sprintf(sql, "insert into ? (");
+
+ for (int i = 0; i < numCols; ++i) {
+ SSchema* colSchema = taosArrayGet(colsSchema, i);
+ snprintf(sql+strlen(sql), freeBytes-strlen(sql), "%s,", colSchema->name);
+ }
+ snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ") values (");
+
+ for (int i = 0; i < numCols; ++i) {
+ snprintf(sql+strlen(sql), freeBytes-strlen(sql), "?,");
+ }
+ snprintf(sql + strlen(sql)-1, freeBytes-strlen(sql)+1, ")");
+ sql[strlen(sql)] = '\0';
+
+ size_t rows = taosArrayGetSize(rowsBind);
+ size_t maxBatchSize = TSDB_MAX_WAL_SIZE/rowSize * 4 / 5;
+ size_t batchSize = MIN(maxBatchSize, rows);
+ tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu, batch size: %zu",
+ info->id, cTableName, rows, batchSize);
+ SArray* batchBind = taosArrayInit(batchSize, POINTER_BYTES);
+ int32_t code = TSDB_CODE_SUCCESS;
+ for (int i = 0; i < rows;) {
+ int j = i;
+ for (; j < i + batchSize && j i) {
+ tscDebug("SML:0x%"PRIx64" insert child table batch from line %d to line %d.", info->id, i, j - 1);
+ code = doInsertChildTableWithStmt(taos, sql, cTableName, batchBind, info);
+ if (code != 0) {
+ taosArrayDestroy(batchBind);
+ tfree(sql);
+ return code;
+ }
+ taosArrayClear(batchBind);
+ }
+ i = j;
+ }
+ taosArrayDestroy(batchBind);
+ tfree(sql);
+ return code;
+}
+
+static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int numPoints,
+ SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) {
+ for (int32_t i = 0; i < numPoints; ++i) {
+ TAOS_SML_DATA_POINT * point = points + i;
+ SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
+
+ for (int j = 0; j < point->tagNum; ++j) {
+ TAOS_SML_KV* kv = point->tags + j;
+ if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) {
+ int64_t ts = *(int64_t*)(kv->value);
+ ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision);
+ *(int64_t*)(kv->value) = ts;
+ }
+ }
+
+ for (int j = 0; j < point->fieldNum; ++j) {
+ TAOS_SML_KV* kv = point->fields + j;
+ if (kv->type == TSDB_DATA_TYPE_TIMESTAMP) {
+ int64_t ts = *(int64_t*)(kv->value);
+ ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, stableSchema->precision);
+ *(int64_t*)(kv->value) = ts;
+ }
+ }
+
+ SArray* cTablePoints = NULL;
+ SArray** pCTablePoints = taosHashGet(cname2points, point->childTableName, strlen(point->childTableName));
+ if (pCTablePoints) {
+ cTablePoints = *pCTablePoints;
+ } else {
+ cTablePoints = taosArrayInit(64, sizeof(point));
+ taosHashPut(cname2points, point->childTableName, strlen(point->childTableName), &cTablePoints, POINTER_BYTES);
+ }
+ taosArrayPush(cTablePoints, &point);
+ }
+
+ return 0;
+}
+
+static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableName,
+ SSmlSTableSchema* sTableSchema, SArray* cTablePoints, SSmlLinesInfo* info) {
+ size_t numTags = taosArrayGetSize(sTableSchema->tags);
+ size_t rows = taosArrayGetSize(cTablePoints);
+
+ TAOS_SML_KV* tagKVs[TSDB_MAX_TAGS] = {0};
+ for (int i= 0; i < rows; ++i) {
+ TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i);
+ for (int j = 0; j < pDataPoint->tagNum; ++j) {
+ TAOS_SML_KV* kv = pDataPoint->tags + j;
+ tagKVs[kv->fieldSchemaIdx] = kv;
+ }
+ }
+
+ SArray* tagBinds = taosArrayInit(numTags, sizeof(TAOS_BIND));
+ taosArraySetSize(tagBinds, numTags);
+ int isNullColBind = TSDB_TRUE;
+ for (int j = 0; j < numTags; ++j) {
+ TAOS_BIND* bind = taosArrayGet(tagBinds, j);
+ bind->is_null = &isNullColBind;
+ }
+ for (int j = 0; j < numTags; ++j) {
+ if (tagKVs[j] == NULL) continue;
+ TAOS_SML_KV* kv = tagKVs[j];
+ TAOS_BIND* bind = taosArrayGet(tagBinds, kv->fieldSchemaIdx);
+ bind->buffer_type = kv->type;
+ bind->length = malloc(sizeof(uintptr_t*));
+ *bind->length = kv->length;
+ bind->buffer = kv->value;
+ bind->is_null = NULL;
+ }
+
+ int32_t code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info);
+
+ for (int i = 0; i < taosArrayGetSize(tagBinds); ++i) {
+ TAOS_BIND* bind = taosArrayGet(tagBinds, i);
+ free(bind->length);
+ }
+ taosArrayDestroy(tagBinds);
+ return code;
+}
+
+static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema, char* cTableName,
+ SArray* cTablePoints, size_t rowSize, SSmlLinesInfo* info) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ size_t numCols = taosArrayGetSize(sTableSchema->fields);
+ size_t rows = taosArrayGetSize(cTablePoints);
+ SArray* rowsBind = taosArrayInit(rows, POINTER_BYTES);
+
+ for (int i = 0; i < rows; ++i) {
+ TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, i);
+
+ TAOS_BIND* colBinds = calloc(numCols, sizeof(TAOS_BIND));
+ if (colBinds == NULL) {
+ tscError("SML:0x%"PRIx64" taos_sml_insert insert points, failed to allocated memory for TAOS_BIND, "
+ "num of rows: %zu, num of cols: %zu", info->id, rows, numCols);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ int isNullColBind = TSDB_TRUE;
+ for (int j = 0; j < numCols; ++j) {
+ TAOS_BIND* bind = colBinds + j;
+ bind->is_null = &isNullColBind;
+ }
+ for (int j = 0; j < point->fieldNum; ++j) {
+ TAOS_SML_KV* kv = point->fields + j;
+ TAOS_BIND* bind = colBinds + kv->fieldSchemaIdx;
+ bind->buffer_type = kv->type;
+ bind->length = malloc(sizeof(uintptr_t*));
+ *bind->length = kv->length;
+ bind->buffer = kv->value;
+ bind->is_null = NULL;
+ }
+ taosArrayPush(rowsBind, &colBinds);
+ }
+
+ code = insertChildTableBatch(taos, cTableName, sTableSchema->fields, rowsBind, rowSize, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" insert into child table %s failed. error %s", info->id, cTableName, tstrerror(code));
+ }
+
+ for (int i = 0; i < rows; ++i) {
+ TAOS_BIND* colBinds = taosArrayGetP(rowsBind, i);
+ for (int j = 0; j < numCols; ++j) {
+ TAOS_BIND* bind = colBinds + j;
+ free(bind->length);
+ }
+ free(colBinds);
+ }
+ taosArrayDestroy(rowsBind);
+ return code;
+}
+
+static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t numPoints, SArray* stableSchemas, SSmlLinesInfo* info) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ SHashObj* cname2points = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ arrangePointsByChildTableName(points, numPoints, cname2points, stableSchemas, info);
+
+ SArray** pCTablePoints = taosHashIterate(cname2points, NULL);
+ while (pCTablePoints) {
+ SArray* cTablePoints = *pCTablePoints;
+
+ TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, 0);
+ SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
+
+ tscDebug("SML:0x%"PRIx64" apply child table tags. child table: %s", info->id, point->childTableName);
+ code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, info);
+ if (code != 0) {
+ tscError("apply child table tags failed. child table %s, error %s", point->childTableName, tstrerror(code));
+ goto cleanup;
+ }
+
+ size_t rowSize = 0;
+ size_t numCols = taosArrayGetSize(sTableSchema->fields);
+ for (int i = 0; i < numCols; ++i) {
+ SSchema* colSchema = taosArrayGet(sTableSchema->fields, i);
+ rowSize += colSchema->bytes;
+ }
+
+ tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s, row size: %zu", info->id, point->childTableName, rowSize);
+ code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints, rowSize, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" Apply child table fields failed. child table %s, error %s", info->id, point->childTableName, tstrerror(code));
+ goto cleanup;
+ }
+
+ tscDebug("SML:0x%"PRIx64" successfully applied data points of child table %s", info->id, point->childTableName);
+
+ pCTablePoints = taosHashIterate(cname2points, pCTablePoints);
+ }
+
+cleanup:
+ pCTablePoints = taosHashIterate(cname2points, NULL);
+ while (pCTablePoints) {
+ SArray* pPoints = *pCTablePoints;
+ taosArrayDestroy(pPoints);
+ pCTablePoints = taosHashIterate(cname2points, pCTablePoints);
+ }
+ taosHashCleanup(cname2points);
+ return code;
+}
+
+int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) {
+ tscDebug("SML:0x%"PRIx64" taos_sml_insert. number of points: %d", info->id, numPoint);
+
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ tscDebug("SML:0x%"PRIx64" build data point schemas", info->id);
+ SArray* stableSchemas = taosArrayInit(32, sizeof(SSmlSTableSchema)); // SArray
+ code = buildDataPointSchemas(points, numPoint, stableSchemas, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" error building data point schemas : %s", info->id, tstrerror(code));
+ goto clean_up;
+ }
+
+ tscDebug("SML:0x%"PRIx64" modify db schemas", info->id);
+ code = modifyDBSchemas(taos, stableSchemas, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" error change db schema : %s", info->id, tstrerror(code));
+ goto clean_up;
+ }
+
+ tscDebug("SML:0x%"PRIx64" apply data points", info->id);
+ code = applyDataPoints(taos, points, numPoint, stableSchemas, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" error apply data points : %s", info->id, tstrerror(code));
+ }
+
+clean_up:
+ for (int i = 0; i < taosArrayGetSize(stableSchemas); ++i) {
+ SSmlSTableSchema* schema = taosArrayGet(stableSchemas, i);
+ taosArrayDestroy(schema->fields);
+ taosArrayDestroy(schema->tags);
+ }
+ taosArrayDestroy(stableSchemas);
+ return code;
+}
+
+int tsc_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) {
+ SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo));
+ info->id = genLinesSmlId();
+ int code = tscSmlInsert(taos, points, numPoint, info);
+ free(info);
+ return code;
+}
+
+//=========================================================================
+
+/* Field Escape charaters
+ 1: measurement Comma,Space
+ 2: tag_key, tag_value, field_key Comma,Equal Sign,Space
+ 3: field_value Double quote,Backslash
+*/
+static void escapeSpecialCharacter(uint8_t field, const char **pos) {
+ const char *cur = *pos;
+ if (*cur != '\\') {
+ return;
+ }
+ switch (field) {
+ case 1:
+ switch (*(cur + 1)) {
+ case ',':
+ case ' ':
+ cur++;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (*(cur + 1)) {
+ case ',':
+ case ' ':
+ case '=':
+ cur++;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (*(cur + 1)) {
+ case '"':
+ case '\\':
+ cur++;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ *pos = cur;
+}
+
+static bool isValidInteger(char *str) {
+ char *c = str;
+ if (*c != '+' && *c != '-' && !isdigit(*c)) {
+ return false;
+ }
+ c++;
+ while (*c != '\0') {
+ if (!isdigit(*c)) {
+ return false;
+ }
+ c++;
+ }
+ return true;
+}
+
+static bool isValidFloat(char *str) {
+ char *c = str;
+ uint8_t has_dot, has_exp, has_sign;
+ has_dot = 0;
+ has_exp = 0;
+ has_sign = 0;
+
+ if (*c != '+' && *c != '-' && *c != '.' && !isdigit(*c)) {
+ return false;
+ }
+ if (*c == '.' && isdigit(*(c + 1))) {
+ has_dot = 1;
+ }
+ c++;
+ while (*c != '\0') {
+ if (!isdigit(*c)) {
+ switch (*c) {
+ case '.': {
+ if (!has_dot && !has_exp && isdigit(*(c + 1))) {
+ has_dot = 1;
+ } else {
+ return false;
+ }
+ break;
+ }
+ case 'e':
+ case 'E': {
+ if (!has_exp && isdigit(*(c - 1)) &&
+ (isdigit(*(c + 1)) ||
+ *(c + 1) == '+' ||
+ *(c + 1) == '-')) {
+ has_exp = 1;
+ } else {
+ return false;
+ }
+ break;
+ }
+ case '+':
+ case '-': {
+ if (!has_sign && has_exp && isdigit(*(c + 1))) {
+ has_sign = 1;
+ } else {
+ return false;
+ }
+ break;
+ }
+ default: {
+ return false;
+ }
+ }
+ }
+ c++;
+ } //while
+ return true;
+}
+
+static bool isTinyInt(char *pVal, uint16_t len) {
+ if (len <= 2) {
+ return false;
+ }
+ if (!strcmp(&pVal[len - 2], "i8")) {
+ //printf("Type is int8(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isTinyUint(char *pVal, uint16_t len) {
+ if (len <= 2) {
+ return false;
+ }
+ if (pVal[0] == '-') {
+ return false;
+ }
+ if (!strcmp(&pVal[len - 2], "u8")) {
+ //printf("Type is uint8(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isSmallInt(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (!strcmp(&pVal[len - 3], "i16")) {
+ //printf("Type is int16(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isSmallUint(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (pVal[0] == '-') {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "u16") == 0) {
+ //printf("Type is uint16(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isInt(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "i32") == 0) {
+ //printf("Type is int32(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isUint(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (pVal[0] == '-') {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "u32") == 0) {
+ //printf("Type is uint32(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isBigInt(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "i64") == 0) {
+ //printf("Type is int64(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isBigUint(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (pVal[0] == '-') {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "u64") == 0) {
+ //printf("Type is uint64(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isFloat(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "f32") == 0) {
+ //printf("Type is float(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isDouble(char *pVal, uint16_t len) {
+ if (len <= 3) {
+ return false;
+ }
+ if (strcmp(&pVal[len - 3], "f64") == 0) {
+ //printf("Type is double(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isBool(char *pVal, uint16_t len, bool *bVal) {
+ if ((len == 1) &&
+ (pVal[len - 1] == 't' ||
+ pVal[len - 1] == 'T')) {
+ //printf("Type is bool(%c)\n", pVal[len - 1]);
+ *bVal = true;
+ return true;
+ }
+
+ if ((len == 1) &&
+ (pVal[len - 1] == 'f' ||
+ pVal[len - 1] == 'F')) {
+ //printf("Type is bool(%c)\n", pVal[len - 1]);
+ *bVal = false;
+ return true;
+ }
+
+ if((len == 4) &&
+ (!strcmp(&pVal[len - 4], "true") ||
+ !strcmp(&pVal[len - 4], "True") ||
+ !strcmp(&pVal[len - 4], "TRUE"))) {
+ //printf("Type is bool(%s)\n", &pVal[len - 4]);
+ *bVal = true;
+ return true;
+ }
+ if((len == 5) &&
+ (!strcmp(&pVal[len - 5], "false") ||
+ !strcmp(&pVal[len - 5], "False") ||
+ !strcmp(&pVal[len - 5], "FALSE"))) {
+ //printf("Type is bool(%s)\n", &pVal[len - 5]);
+ *bVal = false;
+ return true;
+ }
+ return false;
+}
+
+static bool isBinary(char *pVal, uint16_t len) {
+ //binary: "abc"
+ if (len < 2) {
+ return false;
+ }
+ //binary
+ if (pVal[0] == '"' && pVal[len - 1] == '"') {
+ //printf("Type is binary(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isNchar(char *pVal, uint16_t len) {
+ //nchar: L"abc"
+ if (len < 3) {
+ return false;
+ }
+ if (pVal[0] == 'L' && pVal[1] == '"' && pVal[len - 1] == '"') {
+ //printf("Type is nchar(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType) {
+ if (len == 0) {
+ return true;
+ }
+ if ((len == 1) && pVal[0] == '0') {
+ *tsType = SML_TIME_STAMP_NOW;
+ //printf("Type is timestamp(%s)\n", pVal);
+ return true;
+ }
+ if (len < 2) {
+ return false;
+ }
+ //No appendix use usec as default
+ if (isdigit(pVal[len - 1]) && isdigit(pVal[len - 2])) {
+ *tsType = SML_TIME_STAMP_MICRO_SECONDS;
+ //printf("Type is timestamp(%s)\n", pVal);
+ return true;
+ }
+ if (pVal[len - 1] == 's') {
+ switch (pVal[len - 2]) {
+ case 'm':
+ *tsType = SML_TIME_STAMP_MILLI_SECONDS;
+ break;
+ case 'u':
+ *tsType = SML_TIME_STAMP_MICRO_SECONDS;
+ break;
+ case 'n':
+ *tsType = SML_TIME_STAMP_NANO_SECONDS;
+ break;
+ default:
+ if (isdigit(pVal[len - 2])) {
+ *tsType = SML_TIME_STAMP_SECONDS;
+ break;
+ } else {
+ return false;
+ }
+ }
+ //printf("Type is timestamp(%s)\n", pVal);
+ return true;
+ }
+ return false;
+}
+
+static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info) {
+ errno = 0;
+ uint8_t type = pVal->type;
+ int16_t length = pVal->length;
+ int64_t val_s;
+ uint64_t val_u;
+ double val_d;
+
+ if (IS_FLOAT_TYPE(type)) {
+ val_d = strtod(str, NULL);
+ } else {
+ if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ val_s = strtoll(str, NULL, 10);
+ } else {
+ val_u = strtoull(str, NULL, 10);
+ }
+ }
+
+ if (errno == ERANGE) {
+ tscError("SML:0x%"PRIx64" Convert number(%s) out of range", info->id, str);
+ return false;
+ }
+
+ switch (type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ if (!IS_VALID_TINYINT(val_s)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(int8_t *)(pVal->value) = (int8_t)val_s;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ if (!IS_VALID_UTINYINT(val_u)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(uint8_t *)(pVal->value) = (uint8_t)val_u;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ if (!IS_VALID_SMALLINT(val_s)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(int16_t *)(pVal->value) = (int16_t)val_s;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ if (!IS_VALID_USMALLINT(val_u)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(uint16_t *)(pVal->value) = (uint16_t)val_u;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ if (!IS_VALID_INT(val_s)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(int32_t *)(pVal->value) = (int32_t)val_s;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ if (!IS_VALID_UINT(val_u)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(uint32_t *)(pVal->value) = (uint32_t)val_u;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ if (!IS_VALID_BIGINT(val_s)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(int64_t *)(pVal->value) = (int64_t)val_s;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ if (!IS_VALID_UBIGINT(val_u)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(uint64_t *)(pVal->value) = (uint64_t)val_u;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ if (!IS_VALID_FLOAT(val_d)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(float *)(pVal->value) = (float)val_d;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ if (!IS_VALID_DOUBLE(val_d)) {
+ return false;
+ }
+ pVal->value = calloc(length, 1);
+ *(double *)(pVal->value) = (double)val_d;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+//len does not include '\0' from value.
+static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info) {
+ if (len <= 0) {
+ return false;
+ }
+
+ //integer number
+ if (isTinyInt(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_TINYINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 2] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isTinyUint(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_UTINYINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 2] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isSmallInt(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_SMALLINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isSmallUint(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_USMALLINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isInt(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_INT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isUint(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_UINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isBigInt(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_BIGINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isBigUint(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_UBIGINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidInteger(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ //floating number
+ if (isFloat(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_FLOAT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ if (isDouble(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_DOUBLE;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ value[len - 3] = '\0';
+ if (!isValidFloat(value) || !convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ //binary
+ if (isBinary(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_BINARY;
+ pVal->length = len - 2;
+ pVal->value = calloc(pVal->length, 1);
+ //copy after "
+ memcpy(pVal->value, value + 1, pVal->length);
+ return true;
+ }
+ //nchar
+ if (isNchar(value, len)) {
+ pVal->type = TSDB_DATA_TYPE_NCHAR;
+ pVal->length = len - 3;
+ pVal->value = calloc(pVal->length, 1);
+ //copy after L"
+ memcpy(pVal->value, value + 2, pVal->length);
+ return true;
+ }
+ //bool
+ bool bVal;
+ if (isBool(value, len, &bVal)) {
+ pVal->type = TSDB_DATA_TYPE_BOOL;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = calloc(pVal->length, 1);
+ memcpy(pVal->value, &bVal, pVal->length);
+ return true;
+ }
+ //Handle default(no appendix) as float
+ if (isValidInteger(value) || isValidFloat(value)) {
+ pVal->type = TSDB_DATA_TYPE_FLOAT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ if (!convertStrToNumber(pVal, value, info)) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+static int32_t getTimeStampValue(char *value, uint16_t len,
+ SMLTimeStampType type, int64_t *ts) {
+
+ if (len >= 2) {
+ for (int i = 0; i < len - 2; ++i) {
+ if(!isdigit(value[i])) {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ }
+ }
+ //No appendix or no timestamp given (len = 0)
+ if (len >= 1 && isdigit(value[len - 1]) && type != SML_TIME_STAMP_NOW) {
+ type = SML_TIME_STAMP_MICRO_SECONDS;
+ }
+ if (len != 0) {
+ *ts = (int64_t)strtoll(value, NULL, 10);
+ } else {
+ type = SML_TIME_STAMP_NOW;
+ }
+ switch (type) {
+ case SML_TIME_STAMP_NOW: {
+ *ts = taosGetTimestampNs();
+ break;
+ }
+ case SML_TIME_STAMP_SECONDS: {
+ *ts = (int64_t)(*ts * 1e9);
+ break;
+ }
+ case SML_TIME_STAMP_MILLI_SECONDS: {
+ *ts = convertTimePrecision(*ts, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_NANO);
+ break;
+ }
+ case SML_TIME_STAMP_MICRO_SECONDS: {
+ *ts = convertTimePrecision(*ts, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
+ break;
+ }
+ case SML_TIME_STAMP_NANO_SECONDS: {
+ *ts = *ts * 1;
+ break;
+ }
+ default: {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info) {
+ int32_t ret;
+ SMLTimeStampType type;
+ int64_t tsVal;
+
+ if (!isTimeStamp(value, len, &type)) {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ ret = getTimeStampValue(value, len, type, &tsVal);
+ if (ret) {
+ return ret;
+ }
+ tscDebug("SML:0x%"PRIx64"Timestamp after conversion:%"PRId64, info->id, tsVal);
+
+ pVal->type = TSDB_DATA_TYPE_TIMESTAMP;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = calloc(pVal->length, 1);
+ memcpy(pVal->value, &tsVal, pVal->length);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLinesInfo* info) {
+ const char *start, *cur;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ int len = 0;
+ char key[] = "_ts";
+ char *value = NULL;
+
+ start = cur = *index;
+ *pTS = calloc(1, sizeof(TAOS_SML_KV));
+
+ while(*cur != '\0') {
+ cur++;
+ len++;
+ }
+
+ if (len > 0) {
+ value = calloc(len + 1, 1);
+ memcpy(value, start, len);
+ }
+
+ ret = convertSmlTimeStamp(*pTS, value, len, info);
+ if (ret) {
+ free(value);
+ free(*pTS);
+ return ret;
+ }
+ free(value);
+
+ (*pTS)->key = calloc(sizeof(key), 1);
+ memcpy((*pTS)->key, key, sizeof(key));
+ return ret;
+}
+
+static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
+ char *val = NULL;
+ char *cur = key;
+ char keyLower[TSDB_COL_NAME_LEN];
+ size_t keyLen = 0;
+ while(*cur != '\0') {
+ keyLower[keyLen] = tolower(*cur);
+ keyLen++;
+ cur++;
+ }
+ keyLower[keyLen] = '\0';
+
+ val = taosHashGet(pHash, keyLower, keyLen);
+ if (val) {
+ tscError("SML:0x%"PRIx64" Duplicate key detected:%s", info->id, keyLower);
+ return true;
+ }
+
+ uint8_t dummy_val = 0;
+ taosHashPut(pHash, keyLower, strlen(key), &dummy_val, sizeof(uint8_t));
+
+ return false;
+}
+
+static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
+ const char *cur = *index;
+ char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
+ uint16_t len = 0;
+
+ //key field cannot start with digit
+ if (isdigit(*cur)) {
+ tscError("SML:0x%"PRIx64" Tag key cannnot start with digit", info->id);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ while (*cur != '\0') {
+ if (len > TSDB_COL_NAME_LEN) {
+ tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ //unescaped '=' identifies a tag key
+ if (*cur == '=' && *(cur - 1) != '\\') {
+ break;
+ }
+ //Escape special character
+ if (*cur == '\\') {
+ escapeSpecialCharacter(2, &cur);
+ }
+ key[len] = *cur;
+ cur++;
+ len++;
+ }
+ key[len] = '\0';
+
+ if (checkDuplicateKey(key, pHash, info)) {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ pKV->key = calloc(len + 1, 1);
+ memcpy(pKV->key, key, len + 1);
+ //tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
+ *index = cur + 1;
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
+ bool *is_last_kv, SSmlLinesInfo* info) {
+ const char *start, *cur;
+ char *value = NULL;
+ uint16_t len = 0;
+ start = cur = *index;
+
+ while (1) {
+ // unescaped ',' or ' ' or '\0' identifies a value
+ if ((*cur == ',' || *cur == ' ' || *cur == '\0') && *(cur - 1) != '\\') {
+ //unescaped ' ' or '\0' indicates end of value
+ *is_last_kv = (*cur == ' ' || *cur == '\0') ? true : false;
+ break;
+ }
+ //Escape special character
+ if (*cur == '\\') {
+ escapeSpecialCharacter(2, &cur);
+ }
+ cur++;
+ len++;
+ }
+
+ value = calloc(len + 1, 1);
+ memcpy(value, start, len);
+ value[len] = '\0';
+ if (!convertSmlValueType(pKV, value, len, info)) {
+ tscError("SML:0x%"PRIx64" Failed to convert sml value string(%s) to any type",
+ info->id, value);
+ //free previous alocated key field
+ free(pKV->key);
+ pKV->key = NULL;
+ free(value);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ free(value);
+
+ *index = (*cur == '\0') ? cur : cur + 1;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index,
+ uint8_t *has_tags, SSmlLinesInfo* info) {
+ const char *cur = *index;
+ uint16_t len = 0;
+
+ pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write
+ if (pSml->stableName == NULL){
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ if (isdigit(*cur)) {
+ tscError("SML:0x%"PRIx64" Measurement field cannnot start with digit", info->id);
+ free(pSml->stableName);
+ pSml->stableName = NULL;
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ while (*cur != '\0') {
+ if (len > TSDB_TABLE_NAME_LEN) {
+ tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
+ free(pSml->stableName);
+ pSml->stableName = NULL;
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ //first unescaped comma or space identifies measurement
+ //if space detected first, meaning no tag in the input
+ if (*cur == ',' && *(cur - 1) != '\\') {
+ *has_tags = 1;
+ break;
+ }
+ if (*cur == ' ' && *(cur - 1) != '\\') {
+ break;
+ }
+ //Comma, Space, Backslash needs to be escaped if any
+ if (*cur == '\\') {
+ escapeSpecialCharacter(1, &cur);
+ }
+ pSml->stableName[len] = *cur;
+ cur++;
+ len++;
+ }
+ pSml->stableName[len] = '\0';
+ *index = cur + 1;
+ tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+//Table name can only contain digits(0-9),alphebet(a-z),underscore(_)
+static int32_t isValidChildTableName(const char *pTbName, int16_t len) {
+ const char *cur = pTbName;
+ for (int i = 0; i < len; ++i) {
+ if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
+ const char **index, bool isField,
+ TAOS_SML_DATA_POINT* smlData, SHashObj *pHash,
+ SSmlLinesInfo* info) {
+ const char *cur = *index;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ TAOS_SML_KV *pkv;
+ bool is_last_kv = false;
+
+ int32_t capacity = 0;
+ if (isField) {
+ capacity = 64;
+ *pKVs = calloc(capacity, sizeof(TAOS_SML_KV));
+ // leave space for timestamp;
+ pkv = *pKVs;
+ pkv++;
+ } else {
+ capacity = 8;
+ *pKVs = calloc(capacity, sizeof(TAOS_SML_KV));
+ pkv = *pKVs;
+ }
+
+ while (*cur != '\0') {
+ ret = parseSmlKey(pkv, &cur, pHash, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse key", info->id);
+ goto error;
+ }
+ ret = parseSmlValue(pkv, &cur, &is_last_kv, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse value", info->id);
+ goto error;
+ }
+ if (!isField &&
+ (strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) {
+ ret = isValidChildTableName(pkv->value, pkv->length);
+ if (ret) {
+ goto error;
+ }
+ smlData->childTableName = malloc( pkv->length + 1);
+ memcpy(smlData->childTableName, pkv->value, pkv->length);
+ smlData->childTableName[pkv->length] = '\0';
+ free(pkv->key);
+ free(pkv->value);
+ } else {
+ *num_kvs += 1;
+ }
+ if (is_last_kv) {
+ goto done;
+ }
+
+ //reallocate addtional memory for more kvs
+ TAOS_SML_KV *more_kvs = NULL;
+
+ if (isField) {
+ if ((*num_kvs + 2) > capacity) {
+ capacity *= 3; capacity /= 2;
+ more_kvs = realloc(*pKVs, capacity * sizeof(TAOS_SML_KV));
+ } else {
+ more_kvs = *pKVs;
+ }
+ } else {
+ if ((*num_kvs + 1) > capacity) {
+ capacity *= 3; capacity /= 2;
+ more_kvs = realloc(*pKVs, capacity * sizeof(TAOS_SML_KV));
+ } else {
+ more_kvs = *pKVs;
+ }
+ }
+
+ if (!more_kvs) {
+ goto error;
+ }
+ *pKVs = more_kvs;
+ //move pKV points to next TAOS_SML_KV block
+ if (isField) {
+ pkv = *pKVs + *num_kvs + 1;
+ } else {
+ pkv = *pKVs + *num_kvs;
+ }
+ }
+ goto done;
+
+error:
+ return ret;
+done:
+ *index = cur;
+ return ret;
+}
+
+static void moveTimeStampToFirstKv(TAOS_SML_DATA_POINT** smlData, TAOS_SML_KV *ts) {
+ TAOS_SML_KV* tsField = (*smlData)->fields;
+ tsField->length = ts->length;
+ tsField->type = ts->type;
+ tsField->value = malloc(ts->length);
+ tsField->key = malloc(strlen(ts->key) + 1);
+ memcpy(tsField->key, ts->key, strlen(ts->key) + 1);
+ memcpy(tsField->value, ts->value, ts->length);
+ (*smlData)->fieldNum = (*smlData)->fieldNum + 1;
+
+ free(ts->key);
+ free(ts->value);
+ free(ts);
+}
+
+int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
+ const char* index = sql;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ uint8_t has_tags = 0;
+ TAOS_SML_KV *timestamp = NULL;
+ SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+
+ ret = parseSmlMeasurement(smlData, &index, &has_tags, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse measurement", info->id);
+ taosHashCleanup(keyHashTable);
+ return ret;
+ }
+ tscDebug("SML:0x%"PRIx64" Parse measurement finished, has_tags:%d", info->id, has_tags);
+
+ //Parse Tags
+ if (has_tags) {
+ ret = parseSmlKvPairs(&smlData->tags, &smlData->tagNum, &index, false, smlData, keyHashTable, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse tag", info->id);
+ taosHashCleanup(keyHashTable);
+ return ret;
+ }
+ }
+ tscDebug("SML:0x%"PRIx64" Parse tags finished, num of tags:%d", info->id, smlData->tagNum);
+
+ //Parse fields
+ ret = parseSmlKvPairs(&smlData->fields, &smlData->fieldNum, &index, true, smlData, keyHashTable, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse field", info->id);
+ taosHashCleanup(keyHashTable);
+ return ret;
+ }
+ tscDebug("SML:0x%"PRIx64" Parse fields finished, num of fields:%d", info->id, smlData->fieldNum);
+ taosHashCleanup(keyHashTable);
+
+ //Parse timestamp
+ ret = parseSmlTimeStamp(×tamp, &index, info);
+ if (ret) {
+ tscError("SML:0x%"PRIx64" Unable to parse timestamp", info->id);
+ return ret;
+ }
+ moveTimeStampToFirstKv(&smlData, timestamp);
+ tscDebug("SML:0x%"PRIx64" Parse timestamp finished", info->id);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+//=========================================================================
+
+void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) {
+ for (int i=0; itagNum; ++i) {
+ free((point->tags+i)->key);
+ free((point->tags+i)->value);
+ }
+ free(point->tags);
+ for (int i=0; ifieldNum; ++i) {
+ free((point->fields+i)->key);
+ free((point->fields+i)->value);
+ }
+ free(point->fields);
+ free(point->stableName);
+ free(point->childTableName);
+}
+
+int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) {
+ for (int32_t i = 0; i < numLines; ++i) {
+ TAOS_SML_DATA_POINT point = {0};
+ int32_t code = tscParseLine(lines[i], &point, info);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
+ destroySmlDataPoint(&point);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ } else {
+ tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i);
+ }
+
+ taosArrayPush(points, &point);
+ }
+ return 0;
+}
+
+int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
+ int32_t code = 0;
+
+ SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo));
+ info->id = genLinesSmlId();
+
+ if (numLines <= 0 || numLines > 65536) {
+ tscError("SML:0x%"PRIx64" taos_insert_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines);
+ code = TSDB_CODE_TSC_APP_ERROR;
+ return code;
+ }
+
+ for (int i = 0; i < numLines; ++i) {
+ if (lines[i] == NULL) {
+ tscError("SML:0x%"PRIx64" taos_insert_lines line %d is NULL", info->id, i);
+ free(info);
+ code = TSDB_CODE_TSC_APP_ERROR;
+ return code;
+ }
+ }
+
+ SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT));
+ if (lpPoints == NULL) {
+ tscError("SML:0x%"PRIx64" taos_insert_lines failed to allocate memory", info->id);
+ free(info);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ tscDebug("SML:0x%"PRIx64" taos_insert_lines begin inserting %d lines, first line: %s", info->id, numLines, lines[0]);
+ code = tscParseLines(lines, numLines, lpPoints, NULL, info);
+ size_t numPoints = taosArrayGetSize(lpPoints);
+
+ if (code != 0) {
+ goto cleanup;
+ }
+
+ TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
+ code = tscSmlInsert(taos, points, (int)numPoints, info);
+ if (code != 0) {
+ tscError("SML:0x%"PRIx64" taos_sml_insert error: %s", info->id, tstrerror((code)));
+ }
+
+cleanup:
+ tscDebug("SML:0x%"PRIx64" taos_insert_lines finish inserting %d lines. code: %d", info->id, numLines, code);
+ points = TARRAY_GET_START(lpPoints);
+ numPoints = taosArrayGetSize(lpPoints);
+ for (int i=0; ipSql) { pStmt->pSql->res.code = _code; } else {terrno = _code;} \
+ return _code; \
+} while (0)
+
+#define STMT_CHECK if (pStmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) { \
+ STMT_RET(TSDB_CODE_TSC_DISCONNECTED); \
+ }
+
+static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
+ return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
+}
+
static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_t len) {
uint16_t size = stmt->numParts + 1;
if (size > stmt->sizeParts) {
@@ -115,6 +159,22 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
var->i64 = *(int64_t*)tb->buffer;
break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ var->u64 = *(uint8_t*)tb->buffer;
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ var->u64 = *(uint16_t*)tb->buffer;
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ var->u64 = *(uint32_t*)tb->buffer;
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ var->u64 = *(uint64_t*)tb->buffer;
+ break;
+
case TSDB_DATA_TYPE_FLOAT:
var->dKey = GET_FLOAT_VAL(tb->buffer);
break;
@@ -135,8 +195,8 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
default:
- tscDebug("param %d: type mismatch or invalid", i);
- return TSDB_CODE_TSC_INVALID_VALUE;
+ tscError("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind type mismatch or invalid");
}
}
@@ -166,6 +226,8 @@ static int normalStmtPrepare(STscStmt* stmt) {
return code;
}
start = i + token.n;
+ } else if (token.type == TK_ILLEGAL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "invalid sql");
}
i += token.n;
@@ -219,9 +281,17 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
taosStringBuilderAppendInteger(&sb, var->i64);
break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ taosStringBuilderAppendUnsignedInteger(&sb, var->u64);
+ break;
+
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
taosStringBuilderAppendDouble(&sb, var->dKey);
@@ -253,14 +323,69 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
return taosStringBuilderGetResult(&sb, NULL);
}
+static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
+ SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
+ int32_t offset = 0;
+ SSchema *schema = (SSchema*)pBlock->pTableMeta->schema;
+
+ for (int32_t i = 0; i < spd->numOfCols; ++i) {
+ if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null
+ for (int32_t n = 0; n < rowNum; ++n) {
+ char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset;
+
+ if (schema[i].type == TSDB_DATA_TYPE_BINARY) {
+ varDataSetLen(ptr, sizeof(int8_t));
+ *(uint8_t*) varDataVal(ptr) = TSDB_DATA_BINARY_NULL;
+ } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) {
+ varDataSetLen(ptr, sizeof(int32_t));
+ *(uint32_t*) varDataVal(ptr) = TSDB_DATA_NCHAR_NULL;
+ } else {
+ setNull(ptr, schema[i].type, schema[i].bytes);
+ }
+ }
+ }
+
+ offset += schema[i].bytes;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t fillTablesColumnsNull(SSqlObj* pSql) {
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
+
+ STableDataBlocks* pOneTableBlock = *p;
+ while(pOneTableBlock) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+ if (pBlocks->numOfRows > 0 && pOneTableBlock->boundColumnInfo.numOfBound < pOneTableBlock->boundColumnInfo.numOfCols) {
+ fillColumnsNull(pOneTableBlock, pBlocks->numOfRows);
+ }
+
+ p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
+ if (p == NULL) {
+ break;
+ }
+
+ pOneTableBlock = *p;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+
////////////////////////////////////////////////////////////////////////////////
// functions for insertion statement preparation
-static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
- if (bind->is_null != NULL && *(bind->is_null)) {
- setNull(data + param->offset, param->type, param->bytes);
- return TSDB_CODE_SUCCESS;
- }
+static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
+ if (bind->is_null != NULL && *(bind->is_null)) {
+ setNull(data + param->offset, param->type, param->bytes);
+ return TSDB_CODE_SUCCESS;
+ }
+#if 0
if (0) {
// allow user bind param data with different type
union {
@@ -641,8 +766,10 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
}
}
}
+#endif
if (bind->buffer_type != param->type) {
+ tscError("column type mismatch");
return TSDB_CODE_TSC_INVALID_VALUE;
}
@@ -650,26 +777,31 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
switch(param->type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
- size = 1;
+ case TSDB_DATA_TYPE_UTINYINT:
+ *(uint8_t *)(data + param->offset) = *(uint8_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_SMALLINT:
- size = 2;
+ case TSDB_DATA_TYPE_USMALLINT:
+ *(uint16_t *)(data + param->offset) = *(uint16_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_FLOAT:
- size = 4;
+ *(uint32_t *)(data + param->offset) = *(uint32_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
- size = 8;
+ *(uint64_t *)(data + param->offset) = *(uint64_t *)bind->buffer;
break;
case TSDB_DATA_TYPE_BINARY:
if ((*bind->length) > (uintptr_t)param->bytes) {
+ tscError("column length is too big");
return TSDB_CODE_TSC_INVALID_VALUE;
}
size = (short)*bind->length;
@@ -679,6 +811,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
case TSDB_DATA_TYPE_NCHAR: {
int32_t output = 0;
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar failed");
return TSDB_CODE_TSC_INVALID_VALUE;
}
varDataSetLen(data + param->offset, output);
@@ -689,30 +822,159 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
- memcpy(data + param->offset, bind->buffer, size);
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+
return TSDB_CODE_SUCCESS;
}
-static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
- SSqlCmd* pCmd = &stmt->pSql->cmd;
+static int32_t insertStmtGenLastBlock(STableDataBlocks** lastBlock, STableDataBlocks* pBlock) {
+ *lastBlock = (STableDataBlocks*)malloc(sizeof(STableDataBlocks));
+ memcpy(*lastBlock, pBlock, sizeof(STableDataBlocks));
+ (*lastBlock)->cloned = true;
+
+ (*lastBlock)->pData = NULL;
+ (*lastBlock)->ordered = true;
+ (*lastBlock)->prevTS = INT64_MIN;
+ (*lastBlock)->size = sizeof(SSubmitBlk);
+ (*lastBlock)->tsSource = -1;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+ return TSDB_CODE_SUCCESS;
+}
- STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+
+static int32_t insertStmtGenBlock(STscStmt* pStmt, STableDataBlocks** pBlock, STableMeta* pTableMeta, SName* name) {
+ int32_t code = 0;
+
+ if (pStmt->mtb.lastBlock == NULL) {
+ tscError("no previous data block");
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ int32_t msize = tscGetTableMetaSize(pTableMeta);
+ int32_t tsize = sizeof(STableDataBlocks) + msize;
+
+ void *t = malloc(tsize);
+ *pBlock = t;
+
+ memcpy(*pBlock, pStmt->mtb.lastBlock, sizeof(STableDataBlocks));
+
+ t = (char *)t + sizeof(STableDataBlocks);
+ (*pBlock)->pTableMeta = t;
+ memcpy((*pBlock)->pTableMeta, pTableMeta, msize);
+
+ (*pBlock)->pData = malloc((*pBlock)->nAllocSize);
+
+ (*pBlock)->vgId = (*pBlock)->pTableMeta->vgId;
+
+ tNameAssign(&(*pBlock)->tableName, name);
+
+ SSubmitBlk* blk = (SSubmitBlk*)(*pBlock)->pData;
+ memset(blk, 0, sizeof(*blk));
+
+ code = tsSetBlockInfo(blk, pTableMeta, 0);
+ if (code != TSDB_CODE_SUCCESS) {
+ STMT_RET(code);
}
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
+ if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
+ tscError("column mismatch or invalid");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (IS_VAR_DATA_TYPE(param->type) && bind->length == NULL) {
+ tscError("BINARY/NCHAR no length");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ for (int i = 0; i < bind->num; ++i) {
+ char* data = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * (rowNum + i);
+
+ if (bind->is_null != NULL && bind->is_null[i]) {
+ setNull(data + param->offset, param->type, param->bytes);
+ continue;
+ }
+
+ if (!IS_VAR_DATA_TYPE(param->type)) {
+ memcpy(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, tDataTypes[param->type].bytes);
+
+ if (param->offset == 0) {
+ if (tsCheckTimestamp(pBlock, data + param->offset) != TSDB_CODE_SUCCESS) {
+ tscError("invalid timestamp");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ }
+ } else if (param->type == TSDB_DATA_TYPE_BINARY) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("binary length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ int16_t bsize = (short)bind->length[i];
+ STR_WITH_SIZE_TO_VARSTR(data + param->offset, (char *)bind->buffer + bind->buffer_length * i, bsize);
+ } else if (param->type == TSDB_DATA_TYPE_NCHAR) {
+ if (bind->length[i] > (uintptr_t)param->bytes) {
+ tscError("nchar string length too long, ignore it, max:%d, actual:%d", param->bytes, (int32_t)bind->length[i]);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ int32_t output = 0;
+ if (!taosMbsToUcs4((char *)bind->buffer + bind->buffer_length * i, bind->length[i], varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
+ tscError("convert nchar string to UCS4_LE failed:%s", (char*)((char *)bind->buffer + bind->buffer_length * i));
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ varDataSetLen(data + param->offset, output);
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
+ SSqlCmd* pCmd = &stmt->pSql->cmd;
+ STscStmt* pStmt = (STscStmt*)stmt;
+
STableDataBlocks* pBlock = NULL;
- int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
- pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
- if (ret != 0) {
- // todo handle error
+ if (pStmt->multiTbInsert) {
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->insertParam.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
}
- uint32_t totalDataSize = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + 1) * pBlock->rowSize;
if (totalDataSize > pBlock->nAllocSize) {
const double factor = 1.5;
@@ -729,19 +991,147 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
SParamInfo* param = &pBlock->params[j];
- int code = doBindParam(data, param, &bind[param->idx]);
+ int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
if (code != TSDB_CODE_SUCCESS) {
- tscDebug("param %d: type mismatch or invalid", param->idx);
- return code;
+ tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ SSqlCmd* pCmd = &stmt->pSql->cmd;
+ STscStmt* pStmt = (STscStmt*)stmt;
+ int rowNum = bind->num;
+
+ STableDataBlocks* pBlock = NULL;
+
+ if (pStmt->multiTbInsert) {
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ tscError("0x%"PRIx64" Table block hash list is empty", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->insertParam.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pStmt->pSql->self, pStmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
}
+
+ pBlock = *t1;
+ } else {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ int32_t ret =
+ tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ if (!(colIdx == -1 || (colIdx >= 0 && colIdx < pBlock->numOfParams))) {
+ tscError("0x%"PRIx64" invalid colIdx:%d", pStmt->pSql->self, colIdx);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "invalid param colIdx");
+ }
+
+ uint32_t totalDataSize = sizeof(SSubmitBlk) + (pCmd->batchSize + rowNum) * pBlock->rowSize;
+ if (totalDataSize > pBlock->nAllocSize) {
+ const double factor = 1.5;
+
+ void* tmp = realloc(pBlock->pData, (uint32_t)(totalDataSize * factor));
+ if (tmp == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pBlock->pData = (char*)tmp;
+ pBlock->nAllocSize = (uint32_t)(totalDataSize * factor);
+ }
+
+ if (colIdx == -1) {
+ for (uint32_t j = 0; j < pBlock->numOfParams; ++j) {
+ SParamInfo* param = &pBlock->params[j];
+ if (bind[param->idx].num != rowNum) {
+ tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch");
+ }
+
+ int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
+ }
+ }
+
+ pCmd->batchSize += rowNum - 1;
+ } else {
+ SParamInfo* param = &pBlock->params[colIdx];
+
+ int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
+ }
+
+ if (colIdx == (pBlock->numOfParams - 1)) {
+ pCmd->batchSize += rowNum - 1;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int insertStmtUpdateBatch(STscStmt* stmt) {
+ SSqlObj* pSql = stmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+ STableDataBlocks* pBlock = NULL;
+
+ if (pCmd->batchSize > INT16_MAX) {
+ tscError("too many record:%d", pCmd->batchSize);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "too many records");
+ }
+
+ if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pCmd->insertParam.pTableBlockHashList, (const char*)&stmt->mtb.currentUid, sizeof(stmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, stmt->mtb.currentUid);
+ return TSDB_CODE_TSC_APP_ERROR;
}
+ pBlock = *t1;
+
+ STableMeta* pTableMeta = pBlock->pTableMeta;
+
+ pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
+ SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;
+ pBlk->numOfRows = pCmd->batchSize;
+ pBlk->dataLen = 0;
+ pBlk->uid = pTableMeta->id.uid;
+ pBlk->tid = pTableMeta->id.tid;
+
return TSDB_CODE_SUCCESS;
}
static int insertStmtAddBatch(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
++pCmd->batchSize;
+
+ if (stmt->multiTbInsert) {
+ return insertStmtUpdateBatch(stmt);
+ }
+
return TSDB_CODE_SUCCESS;
}
@@ -750,9 +1140,9 @@ static int insertStmtReset(STscStmt* pStmt) {
if (pCmd->batchSize > 2) {
int32_t alloced = (pCmd->batchSize + 1) / 2;
- size_t size = taosArrayGetSize(pCmd->pDataBlocks);
+ size_t size = taosArrayGetSize(pCmd->insertParam.pDataBlocks);
for (int32_t i = 0; i < size; ++i) {
- STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, i);
+ STableDataBlocks* pBlock = taosArrayGetP(pCmd->insertParam.pDataBlocks, i);
uint32_t totalDataSize = pBlock->size - sizeof(SSubmitBlk);
pBlock->size = sizeof(SSubmitBlk) + totalDataSize / alloced;
@@ -763,7 +1153,7 @@ static int insertStmtReset(STscStmt* pStmt) {
}
pCmd->batchSize = 0;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
pTableMetaInfo->vgroupIndex = 0;
return TSDB_CODE_SUCCESS;
}
@@ -771,25 +1161,25 @@ static int insertStmtReset(STscStmt* pStmt) {
static int insertStmtExecute(STscStmt* stmt) {
SSqlCmd* pCmd = &stmt->pSql->cmd;
if (pCmd->batchSize == 0) {
- return TSDB_CODE_TSC_INVALID_VALUE;
+ tscError("no records bind");
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "no records bind");
}
- assert(pCmd->numOfClause == 1);
- if (taosHashGetSize(pCmd->pTableBlockHashList) == 0) {
+ if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) == 0) {
return TSDB_CODE_SUCCESS;
}
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
}
STableDataBlocks* pBlock = NULL;
int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
@@ -799,12 +1189,14 @@ static int insertStmtExecute(STscStmt* stmt) {
pBlk->uid = pTableMeta->id.uid;
pBlk->tid = pTableMeta->id.tid;
- int code = tscMergeTableDataBlocks(stmt->pSql, false);
+ fillTablesColumnsNull(stmt->pSql);
+
+ int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- STableDataBlocks* pDataBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
+ STableDataBlocks* pDataBlock = taosArrayGetP(pCmd->insertParam.pDataBlocks, 0);
code = tscCopyDataBlockToPayload(stmt->pSql, pDataBlock);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -815,38 +1207,290 @@ static int insertStmtExecute(STscStmt* stmt) {
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
// wait for the callback function to post the semaphore
tsem_wait(&pSql->rspSem);
// data block reset
pCmd->batchSize = 0;
- for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
- if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
- tfree(pCmd->pTableNameList[i]);
+ for(int32_t i = 0; i < pCmd->insertParam.numOfTables; ++i) {
+ if (pCmd->insertParam.pTableNameList && pCmd->insertParam.pTableNameList[i]) {
+ tfree(pCmd->insertParam.pTableNameList[i]);
}
}
- pCmd->numOfTables = 0;
- tfree(pCmd->pTableNameList);
- pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pCmd->insertParam.numOfTables = 0;
+ tfree(pCmd->insertParam.pTableNameList);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
return pSql->res.code;
}
+static void insertBatchClean(STscStmt* pStmt) {
+ SSqlCmd *pCmd = &pStmt->pSql->cmd;
+ SSqlObj *pSql = pStmt->pSql;
+ int32_t size = taosHashGetSize(pCmd->insertParam.pTableBlockHashList);
+
+ // data block reset
+ pCmd->batchSize = 0;
+
+ for(int32_t i = 0; i < size; ++i) {
+ if (pCmd->insertParam.pTableNameList && pCmd->insertParam.pTableNameList[i]) {
+ tfree(pCmd->insertParam.pTableNameList[i]);
+ }
+ }
+
+ tfree(pCmd->insertParam.pTableNameList);
+
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ pCmd->insertParam.numOfTables = 0;
+
+ taosHashClear(pCmd->insertParam.pTableBlockHashList);
+ tscFreeSqlResult(pSql);
+ tscFreeSubobj(pSql);
+ tfree(pSql->pSubs);
+ pSql->subState.numOfSub = 0;
+}
+
+static int insertBatchStmtExecute(STscStmt* pStmt) {
+ int32_t code = 0;
+
+ if(pStmt->mtb.nameSet == false) {
+ tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no table name set");
+ }
+
+ pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
+
+ if (taosHashGetSize(pStmt->pSql->cmd.insertParam.pTableBlockHashList) <= 0) { // merge according to vgId
+ tscError("0x%"PRIx64" no data block to insert", pStmt->pSql->self);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+
+ fillTablesColumnsNull(pStmt->pSql);
+
+ if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ code = tscHandleMultivnodeInsert(pStmt->pSql);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ code = pStmt->pSql->res.code;
+
+ insertBatchClean(pStmt);
+
+ return code;
+}
+
+int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
+ SSqlCmd *pCmd = &pSql->cmd;
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if ((ret = tsInsertInitialCheck(pSql)) != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+
+ int32_t index = 0;
+ SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n == 0) {
+ tscError("table is is expected, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql);
+ }
+
+ if (sToken.n == 1 && sToken.type == TK_QUESTION) {
+ pStmt->multiTbInsert = true;
+ pStmt->mtb.tbname = sToken;
+ pStmt->mtb.nameSet = false;
+ if (pStmt->mtb.pTableHash == NULL) {
+ pStmt->mtb.pTableHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ }
+
+ if (pStmt->mtb.pTableBlockHashList == NULL) {
+ pStmt->mtb.pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ }
+
+ pStmt->mtb.tagSet = true;
+
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n > 0 && (sToken.type == TK_VALUES || sToken.type == TK_LP)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (sToken.n <= 0 || sToken.type != TK_USING) {
+ tscError("keywords USING is expected, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
+ }
+
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
+ tscError("invalid token, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
+ }
+ pStmt->mtb.stbname = sToken;
+
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n <= 0 || sToken.type != TK_TAGS) {
+ tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
+ }
+
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n <= 0 || sToken.type != TK_LP) {
+ tscError("( expected, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
+ }
+
+ pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
+
+ int32_t loopCont = 1;
+
+ while (loopCont) {
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n <= 0) {
+ tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql);
+ }
+
+ switch (sToken.type) {
+ case TK_RP:
+ loopCont = 0;
+ break;
+ case TK_VALUES:
+ tscError("unexpected token values, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sToken.z);
+ case TK_QUESTION:
+ pStmt->mtb.tagSet = false; //continue
+ default:
+ taosArrayPush(pStmt->mtb.tags, &sToken);
+ break;
+ }
+ }
+
+ if (taosArrayGetSize(pStmt->mtb.tags) <= 0) {
+ tscError("no tags, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "no tags", pCmd->insertParam.sql);
+ }
+
+ sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
+ if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
+ tscError("sql error, sql:%s", pCmd->insertParam.sql);
+ return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql);
+ }
+
+ pStmt->mtb.values = sToken;
+
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAOS_BIND* tags) {
+ size_t tagNum = taosArrayGetSize(pStmt->mtb.tags);
+ size_t size = 1048576;
+ char *str = calloc(1, size);
+ size_t len = 0;
+ int32_t ret = 0;
+ int32_t j = 0;
+
+ while (1) {
+ len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s tags(", name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z);
+ if (len >= (size -1)) {
+ size *= 2;
+ free(str);
+ str = calloc(1, size);
+ continue;
+ }
+
+ j = 0;
+
+ for (size_t i = 0; i < tagNum && len < (size - 1); ++i) {
+ SStrToken *t = taosArrayGet(pStmt->mtb.tags, i);
+ if (t->type == TK_QUESTION) {
+ int32_t l = 0;
+ if (i > 0) {
+ str[len++] = ',';
+ }
+
+ if (tags[j].is_null && (*tags[j].is_null)) {
+ ret = converToStr(str + len, TSDB_DATA_TYPE_NULL, NULL, -1, &l);
+ } else {
+ if (tags[j].buffer == NULL) {
+ free(str);
+ tscError("empty tag value in params");
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "empty tag value in params");
+ }
+
+ ret = converToStr(str + len, tags[j].buffer_type, tags[j].buffer, tags[j].length ? (int32_t)*tags[j].length : -1, &l);
+ }
+
+ ++j;
+
+ if (ret) {
+ free(str);
+ return ret;
+ }
+
+ len += l;
+ } else {
+ len += (size_t)snprintf(str + len, size - len - 1, i > 0 ? ",%.*s" : "%.*s", t->n, t->z);
+ }
+ }
+
+ if (len >= (size - 1)) {
+ size *= 2;
+ free(str);
+ str = calloc(1, size);
+ continue;
+ }
+
+ strcat(str, ") ");
+ len += 2;
+
+ if ((len + strlen(pStmt->mtb.values.z)) >= (size - 1)) {
+ size *= 2;
+ free(str);
+ str = calloc(1, size);
+ continue;
+ }
+
+ strcat(str, pStmt->mtb.values.z);
+
+ break;
+ }
+
+ if (pStmt->mtb.sqlstr == NULL) {
+ pStmt->mtb.sqlstr = pSql->sqlstr;
+ } else {
+ tfree(pSql->sqlstr);
+ }
+
+ pSql->sqlstr = str;
+
+ return TSDB_CODE_SUCCESS;
+}
+
////////////////////////////////////////////////////////////////////////////////
// interface functions
TAOS_STMT* taos_stmt_init(TAOS* taos) {
STscObj* pObj = (STscObj*)taos;
+ STscStmt* pStmt = NULL;
+
if (pObj == NULL || pObj->signature != pObj) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
tscError("connection disconnected");
return NULL;
}
- STscStmt* pStmt = calloc(1, sizeof(STscStmt));
+ pStmt = calloc(1, sizeof(STscStmt));
if (pStmt == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("failed to allocate memory for statement");
@@ -855,6 +1499,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
pStmt->taos = pObj;
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
+
if (pSql == NULL) {
free(pStmt);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -862,11 +1507,22 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
return NULL;
}
+ if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
+ free(pSql);
+ free(pStmt);
+ terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscError("failed to malloc payload buffer");
+ return NULL;
+ }
+
tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->isBind = true;
pStmt->pSql = pSql;
+ pStmt->last = STMT_INIT;
+ registerSqlObj(pSql);
return pStmt;
}
@@ -874,16 +1530,20 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STscStmt* pStmt = (STscStmt*)stmt;
- if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
- terrno = TSDB_CODE_TSC_DISCONNECTED;
- return TSDB_CODE_TSC_DISCONNECTED;
- }
+ STMT_CHECK
if (sql == NULL) {
tscError("sql is NULL");
- return TSDB_CODE_TSC_APP_ERROR;
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "sql is NULL"));
+ }
+
+ if (pStmt->last != STMT_INIT) {
+ tscError("prepare status error, last:%d", pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "prepare status error"));
}
+ pStmt->last = STMT_PREPARE;
+
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
@@ -893,51 +1553,238 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pSql->fp = waitForQueryRsp;
pSql->fetchFp = waitForQueryRsp;
- pCmd->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
-
- if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
- tscError("%p failed to malloc payload buffer", pSql);
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
+ pCmd->insertParam.objectId = pSql->self;
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
- free(pCmd->payload);
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
}
pRes->qId = 0;
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
- tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
+ tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
- pSql->cmd.numOfParams = 0;
+ pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0;
- registerSqlObj(pSql);
+ int32_t ret = stmtParseInsertTbTags(pSql, pStmt);
+ if (ret != TSDB_CODE_SUCCESS) {
+ STMT_RET(ret);
+ }
+
+ if (pStmt->multiTbInsert) {
+ STMT_RET(TSDB_CODE_SUCCESS);
+ }
+
+ memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
tsem_wait(&pSql->rspSem);
- return pSql->res.code;
+ STMT_RET(pSql->res.code);
}
- return code;
+ STMT_RET(code);
}
pStmt->isInsert = false;
- return normalStmtPrepare(pStmt);
+ STMT_RET(normalStmtPrepare(pStmt));
+}
+
+int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ int32_t code = 0;
+
+ STMT_CHECK
+
+ SSqlObj* pSql = pStmt->pSql;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ if (name == NULL) {
+ tscError("0x%"PRIx64" name is NULL", pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "name is NULL"));
+ }
+
+ if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
+ tscError("0x%"PRIx64" not multiple table insert", pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not multiple table insert"));
+ }
+
+ if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
+ tscError("0x%"PRIx64" set_tbname_tags status error, last:%d", pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "set_tbname_tags status error"));
+ }
+
+ pStmt->last = STMT_SETTBNAME;
+
+ uint64_t* uid = (uint64_t*)taosHashGet(pStmt->mtb.pTableHash, name, strlen(name));
+ if (uid != NULL) {
+ pStmt->mtb.currentUid = *uid;
+
+ STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
+ if (t1 == NULL) {
+ tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
+ STMT_RET(TSDB_CODE_TSC_APP_ERROR);
+ }
+
+ SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
+ pCmd->batchSize = pBlk->numOfRows;
+ if (pBlk->numOfRows == 0) {
+ (*t1)->prevTS = INT64_MIN;
+ }
+
+ tsSetBlockInfo(pBlk, (*t1)->pTableMeta, pBlk->numOfRows);
+
+ taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
+
+ tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
+ STMT_RET(TSDB_CODE_SUCCESS);
+ }
+
+ if (pStmt->mtb.subSet && taosHashGetSize(pStmt->mtb.pTableHash) > 0) {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ char sTableName[TSDB_TABLE_FNAME_LEN] = {0};
+ tstrncpy(sTableName, pTableMeta->sTableName, sizeof(sTableName));
+
+ SStrToken tname = {0};
+ tname.type = TK_STRING;
+ tname.z = (char *)name;
+ tname.n = (uint32_t)strlen(name);
+ SName fullname = {0};
+ tscSetTableFullName(&fullname, &tname, pSql);
+
+ memcpy(&pTableMetaInfo->name, &fullname, sizeof(fullname));
+
+ code = tscGetTableMetaEx(pSql, pTableMetaInfo, false, true);
+ if (code != TSDB_CODE_SUCCESS) {
+ STMT_RET(code);
+ }
+
+ pTableMeta = pTableMetaInfo->pTableMeta;
+
+ if (strcmp(sTableName, pTableMeta->sTableName)) {
+ tscError("0x%"PRIx64" only tables belongs to one stable is allowed", pSql->self);
+ STMT_RET(TSDB_CODE_TSC_APP_ERROR);
+ }
+
+ STableDataBlocks* pBlock = NULL;
+
+ insertStmtGenBlock(pStmt, &pBlock, pTableMeta, &pTableMetaInfo->name);
+
+ pCmd->batchSize = 0;
+
+ pStmt->mtb.currentUid = pTableMeta->id.uid;
+ pStmt->mtb.tbNum++;
+
+ taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+ taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+ taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
+
+ tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
+
+ STMT_RET(TSDB_CODE_SUCCESS);
+ }
+
+ if (pStmt->mtb.tagSet) {
+ pStmt->mtb.tbname = tscReplaceStrToken(&pSql->sqlstr, &pStmt->mtb.tbname, name);
+ } else {
+ if (tags == NULL) {
+ tscError("No tags set");
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no tags set"));
+ }
+
+ int32_t ret = stmtGenInsertStatement(pSql, pStmt, name, tags);
+ if (ret != TSDB_CODE_SUCCESS) {
+ STMT_RET(ret);
+ }
+ }
+
+ pStmt->mtb.nameSet = true;
+
+ tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
+
+ pSql->cmd.insertParam.numOfParams = 0;
+ pSql->cmd.batchSize = 0;
+
+ if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
+ SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
+ pCmd->insertParam.pTableBlockHashList = NULL;
+ tscResetSqlCmd(pCmd, false);
+ pCmd->insertParam.pTableBlockHashList = hashList;
+ }
+
+ code = tsParseSql(pStmt->pSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ // wait for the callback function to post the semaphore
+ tsem_wait(&pStmt->pSql->rspSem);
+
+ code = pStmt->pSql->res.code;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ STableDataBlocks* pBlock = NULL;
+ code = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ STMT_RET(code);
+ }
+
+ SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
+ blk->numOfRows = 0;
+
+ pStmt->mtb.currentUid = pTableMeta->id.uid;
+ pStmt->mtb.tbNum++;
+
+ taosHashPut(pStmt->mtb.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)&pBlock, POINTER_BYTES);
+ taosHashPut(pStmt->mtb.pTableHash, name, strlen(name), (char*) &pTableMeta->id.uid, sizeof(pTableMeta->id.uid));
+
+ if (pStmt->mtb.lastBlock == NULL) {
+ insertStmtGenLastBlock(&pStmt->mtb.lastBlock, pBlock);
+ }
+
+ tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
+ }
+
+ STMT_RET(code);
}
+
+int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+ pStmt->mtb.subSet = true;
+ return taos_stmt_set_tbname_tags(stmt, name, NULL);
+}
+
+
+
+int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+ pStmt->mtb.subSet = false;
+ return taos_stmt_set_tbname_tags(stmt, name, NULL);
+}
+
+
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ if (pStmt == NULL || pStmt->taos == NULL) {
+ STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
+ }
if (!pStmt->isInsert) {
SNormalStmt* normal = &pStmt->normal;
if (normal->params != NULL) {
@@ -948,59 +1795,181 @@ int taos_stmt_close(TAOS_STMT* stmt) {
}
free(normal->parts);
free(normal->sql);
+ } else {
+ if (pStmt->multiTbInsert) {
+ taosHashCleanup(pStmt->mtb.pTableHash);
+ bool rmMeta = false;
+ if (pStmt->pSql && pStmt->pSql->res.code != 0) {
+ rmMeta = true;
+ }
+ tscDestroyDataBlock(pStmt->mtb.lastBlock, rmMeta);
+ pStmt->mtb.pTableBlockHashList = tscDestroyBlockHashTable(pStmt->mtb.pTableBlockHashList, rmMeta);
+ if (pStmt->pSql){
+ taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList);
+ pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL;
+ }
+
+ taosArrayDestroy(pStmt->mtb.tags);
+ tfree(pStmt->mtb.sqlstr);
+ }
}
taos_free_result(pStmt->pSql);
- free(pStmt);
- return TSDB_CODE_SUCCESS;
+ tfree(pStmt);
+ STMT_RET(TSDB_CODE_SUCCESS);
}
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+
if (pStmt->isInsert) {
- return insertStmtBindParam(pStmt, bind);
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
+ }
+
+ pStmt->last = STMT_BIND;
+
+ tscDebug("tableId:%" PRIu64 ", try to bind one row", pStmt->mtb.currentUid);
+
+ STMT_RET(insertStmtBindParam(pStmt, bind));
+ } else {
+ STMT_RET(normalStmtBindParam(pStmt, bind));
+ }
+}
+
+
+int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ STMT_CHECK
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not or invalid batch insert"));
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
} else {
- return normalStmtBindParam(pStmt, bind);
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
}
+
+ pStmt->last = STMT_BIND;
+
+ STMT_RET(insertStmtBindParamBatch(pStmt, bind, -1));
}
+int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+
+ if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX || colIdx < 0) {
+ tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
+ }
+
+ if (!pStmt->isInsert) {
+ tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not or invalid batch insert"));
+ }
+
+ if (pStmt->multiTbInsert) {
+ if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
+ } else {
+ if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
+ tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
+ }
+ }
+
+ pStmt->last = STMT_BIND_COL;
+
+ STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx));
+}
+
+
+
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+
if (pStmt->isInsert) {
- return insertStmtAddBatch(pStmt);
+ if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
+ tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "add batch status error"));
+ }
+
+ pStmt->last = STMT_ADD_BATCH;
+
+ STMT_RET(insertStmtAddBatch(pStmt));
}
- return TSDB_CODE_COM_OPS_NOT_SUPPORT;
+
+ STMT_RET(TSDB_CODE_COM_OPS_NOT_SUPPORT);
}
int taos_stmt_reset(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt->isInsert) {
- return insertStmtReset(pStmt);
+ STMT_RET(insertStmtReset(pStmt));
}
- return TSDB_CODE_SUCCESS;
+ STMT_RET(TSDB_CODE_SUCCESS);
}
int taos_stmt_execute(TAOS_STMT* stmt) {
int ret = 0;
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_CHECK
+
if (pStmt->isInsert) {
- ret = insertStmtExecute(pStmt);
+ if (pStmt->last != STMT_ADD_BATCH) {
+ tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "exec status error"));
+ }
+
+ pStmt->last = STMT_EXECUTE;
+
+ pStmt->pSql->cmd.insertParam.payloadType = PAYLOAD_TYPE_RAW;
+ if (pStmt->multiTbInsert) {
+ ret = insertBatchStmtExecute(pStmt);
+ } else {
+ ret = insertStmtExecute(pStmt);
+ }
} else { // normal stmt query
char* sql = normalStmtBuildSql(pStmt);
if (sql == NULL) {
ret = TSDB_CODE_TSC_OUT_OF_MEMORY;
} else {
- if (pStmt->pSql != NULL) {
- taos_free_result(pStmt->pSql);
- pStmt->pSql = NULL;
- }
+ taosReleaseRef(tscObjRef, pStmt->pSql->self);
pStmt->pSql = taos_query((TAOS*)pStmt->taos, sql);
ret = taos_errno(pStmt->pSql);
free(sql);
}
}
- return ret;
+ STMT_RET(ret);
}
TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) {
@@ -1014,7 +1983,6 @@ TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) {
tscError("result has been used already.");
return NULL;
}
-
TAOS_RES* result = pStmt->pSql;
pStmt->pSql = NULL;
return result;
@@ -1023,76 +1991,80 @@ TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) {
int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert) {
STscStmt* pStmt = (STscStmt*)stmt;
- if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
- terrno = TSDB_CODE_TSC_DISCONNECTED;
- return TSDB_CODE_TSC_DISCONNECTED;
- }
+ STMT_CHECK
if (insert) *insert = pStmt->isInsert;
- return TSDB_CODE_SUCCESS;
+ STMT_RET(TSDB_CODE_SUCCESS);
}
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
STscStmt* pStmt = (STscStmt*)stmt;
- if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
- terrno = TSDB_CODE_TSC_DISCONNECTED;
- return TSDB_CODE_TSC_DISCONNECTED;
- }
+ STMT_CHECK
if (pStmt->isInsert) {
SSqlObj* pSql = pStmt->pSql;
- SSqlCmd *pCmd = &pSql->cmd;
- *nums = pCmd->numOfParams;
- return TSDB_CODE_SUCCESS;
+ SSqlCmd *pCmd = &pSql->cmd;
+ *nums = pCmd->insertParam.numOfParams;
+ STMT_RET(TSDB_CODE_SUCCESS);
} else {
SNormalStmt* normal = &pStmt->normal;
*nums = normal->numParams;
- return TSDB_CODE_SUCCESS;
+ STMT_RET(TSDB_CODE_SUCCESS);
}
}
int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
STscStmt* pStmt = (STscStmt*)stmt;
- if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
- terrno = TSDB_CODE_TSC_DISCONNECTED;
- return TSDB_CODE_TSC_DISCONNECTED;
- }
+ STMT_CHECK
if (pStmt->isInsert) {
SSqlCmd* pCmd = &pStmt->pSql->cmd;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- if (pCmd->pTableBlockHashList == NULL) {
- pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ if (pCmd->insertParam.pTableBlockHashList == NULL) {
+ pCmd->insertParam.pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
}
STableDataBlocks* pBlock = NULL;
int32_t ret =
- tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (ret != 0) {
- // todo handle error
+ STMT_RET(ret);
}
if (idx<0 || idx>=pBlock->numOfParams) {
- tscError("param %d: out of range", idx);
- abort();
+ tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
+ STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "idx out of range"));
}
SParamInfo* param = &pBlock->params[idx];
if (type) *type = param->type;
if (bytes) *bytes = param->bytes;
- return TSDB_CODE_SUCCESS;
+ STMT_RET(TSDB_CODE_SUCCESS);
} else {
- return TSDB_CODE_TSC_APP_ERROR;
+ STMT_RET(TSDB_CODE_COM_OPS_NOT_SUPPORT);
}
}
+
+char *taos_stmt_errstr(TAOS_STMT *stmt) {
+ STscStmt* pStmt = (STscStmt*)stmt;
+
+ if (stmt == NULL) {
+ return (char*) tstrerror(terrno);
+ }
+
+ return taos_errstr(pStmt->pSql);
+}
+
+
+
const char *taos_data_type(int type) {
switch (type) {
case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL";
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 5b2198be622de1b5fbe63d0629aa1bc10e1e72f2..70a3e03d623aa968857c737806d7d6cbfb9c600b 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -16,6 +16,7 @@
#include "os.h"
#include "tscLog.h"
#include "tsclient.h"
+#include "tsocket.h"
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
@@ -228,7 +229,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
-
+
pHeartbeat->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
@@ -250,8 +251,18 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pQdesc->stime = htobe64(pSql->stime);
pQdesc->queryId = htonl(pSql->queryId);
//pQdesc->useconds = htobe64(pSql->res.useconds);
- pQdesc->useconds = htobe64(now - pSql->stime); // use local time instead of sever rsp elapsed time
- pQdesc->qHandle = htobe64(pSql->res.qId);
+ pQdesc->useconds = htobe64(now - pSql->stime);
+ pQdesc->qId = htobe64(pSql->res.qId);
+ pQdesc->sqlObjId = htobe64(pSql->self);
+ pQdesc->pid = pHeartbeat->pid;
+ if (pSql->cmd.pQueryInfo->stableQuery == true) {
+ pQdesc->numOfSub = pSql->subState.numOfSub;
+ } else {
+ pQdesc->numOfSub = 1;
+ }
+ pQdesc->numOfSub = htonl(pQdesc->numOfSub);
+
+ taosGetFqdn(pQdesc->fqdn);
pHeartbeat->numOfQueries++;
pQdesc++;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 3926564b50f37e8a0b43f685aceba0cc157aa675..64a0b06bef9b53f657dffaa59e257a095f4ab228 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -20,7 +20,12 @@
#define _GNU_SOURCE
#endif // __APPLE__
+#include
#include "os.h"
+#include "qPlan.h"
+#include "qSqlparser.h"
+#include "qTableMeta.h"
+#include "qUtil.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
@@ -28,23 +33,22 @@
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
-#include "tschemautil.h"
#include "tsclient.h"
#include "tstrbuild.h"
#include "ttoken.h"
#include "ttokendef.h"
+#include "qScript.h"
#include "ttype.h"
-#include "qUtil.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
-// -1 is tbname column index, so here use the -3 as the initial value
-#define COLUMN_INDEX_INITIAL_VAL (-3)
+// -1 is tbname column index, so here use the -2 as the initial value
+#define COLUMN_INDEX_INITIAL_VAL (-2)
#define COLUMN_INDEX_INITIALIZER \
{ COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL }
-#define COLUMN_INDEX_VALIDE(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_BLOCK_DIST_COLUMN_INDEX))
+#define COLUMN_INDEX_VALIDE(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_TBNAME_COLUMN_INDEX))
#define TBNAME_LIST_SEP ","
typedef struct SColumnList { // todo refactor
@@ -57,44 +61,47 @@ typedef struct SConvertFunc {
int32_t execFuncId;
} SConvertFunc;
-static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex);
+static SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex, int32_t colId);
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
+static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
+static bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision);
+
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
-static char* cloneCurrentDBName(SSqlObj* pSql);
static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len);
+static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength);
-static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, int32_t nameLength);
-
-static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult);
-static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
- int8_t type, char* fieldName, SSqlExpr* pSqlExpr);
+static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem,
+ bool finalResult, SUdfInfo* pUdfInfo);
+static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes,
+ int8_t type, char* fieldName, SExprInfo* pSqlExpr);
-static uint8_t convertOptr(SStrToken *pToken);
+static uint8_t convertRelationalOperator(SStrToken *pToken);
-static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery);
+static int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool isSTable, bool joinQuery, bool timeWindowQuery);
static bool validateIpAddress(const char* ip, size_t size);
static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool twQuery);
-static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd);
+static int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd);
-static int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode);
+static int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
static int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* offsetToken);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSliding);
+static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable);
-static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem);
+static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem, bool outerQuery);
-static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql);
-static int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySQL);
-static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode, SSchema* pSchema);
+static int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql);
+static int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
+static int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema);
static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
@@ -106,39 +113,136 @@ static int32_t validateDNodeConfig(SMiscInfo* pOptions);
static int32_t validateLocalConfig(SMiscInfo* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
+static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
-static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
+static int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
-static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySqlNode* pQuerySqlNode, SSqlObj* pSql);
+static int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql);
-static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg);
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
-static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
-static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode);
+static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg);
+static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate);
-static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex);
+static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex);
static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* pInfo);
static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo);
-static int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index);
-static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid);
+static int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInfo);
+
+static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid);
static bool validateDebugFlag(int32_t v);
+static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
+static int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo);
-static bool isTimeWindowQuery(SQueryInfo* pQueryInfo) {
+static bool isTimeWindowQuery(SQueryInfo* pQueryInfo) {
return pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0;
}
-int16_t getNewResColId(SQueryInfo* pQueryInfo) {
- return pQueryInfo->resColumnId--;
+
+int16_t getNewResColId(SSqlCmd* pCmd) {
+ return pCmd->resColumnId--;
+}
+
+// serialize expr in exprlist to binary
+// format "type | size | value"
+bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision) {
+ bool ret = false;
+ if (!pList || pList->size <= 0 || colType < 0) {
+ return ret;
+ }
+
+ tSqlExpr* item = ((tSqlExprItem*)(taosArrayGet(pList, 0)))->pNode;
+ int32_t firstVarType = item->value.nType;
+
+ SBufferWriter bw = tbufInitWriter( NULL, false);
+ tbufEnsureCapacity(&bw, 512);
+ if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ tbufWriteUint32(&bw, TSDB_DATA_TYPE_BIGINT);
+ } else {
+ tbufWriteUint32(&bw, colType);
+ }
+
+ tbufWriteInt32(&bw, (int32_t)(pList->size));
+
+ for (int32_t i = 0; i < (int32_t)pList->size; i++) {
+ tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
+ tVariant* var = &pSub->value;
+
+ // check all the exprToken type in expr list same or not
+ if (firstVarType != var->nType) {
+ break;
+ }
+ if ((colType == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(colType))) {
+ if (var->nType != TSDB_DATA_TYPE_BOOL && !IS_SIGNED_NUMERIC_TYPE(var->nType)) {
+ break;
+ }
+ tbufWriteInt64(&bw, var->i64);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ tbufWriteUint64(&bw, var->u64);
+ } else {
+ break;
+ }
+ } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
+ if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) {
+ tbufWriteDouble(&bw, (double)(var->i64));
+ } else if (var->nType == TSDB_DATA_TYPE_DOUBLE || var->nType == TSDB_DATA_TYPE_FLOAT){
+ tbufWriteDouble(&bw, var->dKey);
+ } else {
+ break;
+ }
+ } else if (colType == TSDB_DATA_TYPE_BINARY) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ tbufWriteBinary(&bw, var->pz, var->nLen);
+ } else if (colType == TSDB_DATA_TYPE_NCHAR) {
+ if (var->nType != TSDB_DATA_TYPE_BINARY) {
+ break;
+ }
+ char *buf = (char *)calloc(1, (var->nLen + 1)*TSDB_NCHAR_SIZE);
+ if (tVariantDump(var, buf, colType, false) != TSDB_CODE_SUCCESS) {
+ free(buf);
+ break;
+ }
+ tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE);
+ free(buf);
+ } else if (colType == TSDB_DATA_TYPE_TIMESTAMP) {
+ if (var->nType == TSDB_DATA_TYPE_BINARY) {
+ if (convertTimestampStrToInt64(var, precision) < 0) {
+ break;
+ }
+ tbufWriteInt64(&bw, var->i64);
+ } else if (var->nType == TSDB_DATA_TYPE_BIGINT) {
+ tbufWriteInt64(&bw, var->i64);
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ if (i == (int32_t)(pList->size - 1)) { ret = true;}
+ }
+ if (ret == true) {
+ if ((*dst = calloc(1, sizeof(tVariant))) != NULL) {
+ tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY);
+ } else {
+ ret = false;
+ }
+ }
+ tbufCloseWriter(&bw);
+ return ret;
}
-static uint8_t convertOptr(SStrToken *pToken) {
+
+static uint8_t convertRelationalOperator(SStrToken *pToken) {
switch (pToken->type) {
case TK_LT:
return TSDB_RELATION_LESS;
@@ -158,6 +262,7 @@ static uint8_t convertOptr(SStrToken *pToken) {
return TSDB_RELATION_EQUAL;
case TK_PLUS:
return TSDB_BINARY_OP_ADD;
+
case TK_MINUS:
return TSDB_BINARY_OP_SUBTRACT;
case TK_STAR:
@@ -173,6 +278,8 @@ static uint8_t convertOptr(SStrToken *pToken) {
return TSDB_RELATION_ISNULL;
case TK_NOTNULL:
return TSDB_RELATION_NOTNULL;
+ case TK_IN:
+ return TSDB_RELATION_IN;
default: { return 0; }
}
}
@@ -191,33 +298,37 @@ static bool validateDebugFlag(int32_t v) {
* Used during parsing query sql. Since the query sql usually small in length, error position
* is not needed in the final error message.
*/
-static int32_t invalidSqlErrMsg(char* dstBuffer, const char* errMsg) {
- return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL);
+static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
+ return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
}
-static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
+static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision) {
int64_t time = 0;
- const char* msg = "invalid timestamp";
-
strdequote(pVar->pz);
- char* seg = strnchr(pVar->pz, '-', pVar->nLen, false);
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
+ char* seg = strnchr(pVar->pz, '-', pVar->nLen, false);
if (seg != NULL) {
- if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ if (taosParseTime(pVar->pz, &time, pVar->nLen, precision, tsDaylight) != TSDB_CODE_SUCCESS) {
+ return -1;
}
} else {
if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return -1;
}
}
-
tVariantDestroy(pVar);
tVariantCreateFromBinary(pVar, (char*)&time, 0, TSDB_DATA_TYPE_BIGINT);
+ return 0;
+}
+static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
+ const char* msg = "invalid timestamp";
+
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+ if (convertTimestampStrToInt64(pVar, tinfo.precision) < 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -227,24 +338,198 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
const char* msg3 = "password needs single quote marks enclosed";
if (pPwd->type != TK_STRING) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strdequote(pPwd->z);
pPwd->n = (uint32_t)strtrim(pPwd->z); // trim space before and after passwords
if (pPwd->n <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pPwd->n >= TSDB_KEY_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+// validate the out put field type for "UNION ALL" subclause
+static int32_t normalizeVarDataTypeLength(SSqlCmd* pCmd) {
+ const char* msg1 = "columns in select clause not identical";
+
+ int32_t diffSize = 0;
+
+ // if there is only one element, the limit of clause is the limit of global result.
+ SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo;
+ SQueryInfo* pSibling = pQueryInfo1->sibling;
+
+ while(pSibling != NULL) {
+ int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo, &diffSize);
+ if (ret != 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ pSibling = pSibling->sibling;
+ }
+
+ if (diffSize) {
+ pQueryInfo1 = pCmd->pQueryInfo;
+ pSibling = pQueryInfo1->sibling;
+
+ while(pSibling->sibling != NULL) {
+ tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo);
+ pSibling = pSibling->sibling;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t readFromFile(char *name, uint32_t *len, void **buf) {
+ struct stat fileStat;
+ if (stat(name, &fileStat) < 0) {
+ tscError("stat file %s failed, error:%s", name, strerror(errno));
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ *len = fileStat.st_size;
+
+ if (*len <= 0) {
+ tscError("file %s is empty", name);
+ return TSDB_CODE_TSC_FILE_EMPTY;
+ }
+
+ *buf = calloc(1, *len);
+ if (*buf == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ int fd = open(name, O_RDONLY);
+ if (fd < 0) {
+ tscError("open file %s failed, error:%s", name, strerror(errno));
+ tfree(*buf);
+ return TAOS_SYSTEM_ERROR(errno);
+ }
+
+ int64_t s = taosRead(fd, *buf, *len);
+ if (s != *len) {
+ tscError("read file %s failed, error:%s", name, strerror(errno));
+ close(fd);
+ tfree(*buf);
+ return TSDB_CODE_TSC_APP_ERROR;
+ }
+ close(fd);
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
+ const char *msg1 = "function name is too long";
+ const char *msg2 = "path is too long";
+ const char *msg3 = "invalid outputtype";
+ const char *msg4 = "invalid script";
+ const char *msg5 = "invalid dyn lib";
+ SSqlCmd *pCmd = &pSql->cmd;
+
+ switch (pInfo->type) {
+ case TSDB_SQL_CREATE_FUNCTION: {
+ SCreateFuncInfo *createInfo = &pInfo->pMiscInfo->funcOpt;
+ uint32_t len = 0;
+ void *buf = NULL;
+
+ if (createInfo->output.type == (uint8_t)-1 || createInfo->output.bytes < 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
+ createInfo->name.z[createInfo->name.n] = 0;
+ // funcname's naming rule is same to column
+ if (validateColumnName(createInfo->name.z) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ strdequote(createInfo->name.z);
+
+ if (strlen(createInfo->name.z) >= TSDB_FUNC_NAME_LEN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ createInfo->path.z[createInfo->path.n] = 0;
+
+ strdequote(createInfo->path.z);
+
+ if (strlen(createInfo->path.z) >= PATH_MAX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ int32_t ret = readFromFile(createInfo->path.z, &len, &buf);
+ if (ret) {
+ return ret;
+ }
+ //validate *.lua or .so
+ int32_t pathLen = (int32_t)strlen(createInfo->path.z);
+ if ((pathLen > 4) && (0 == strncmp(createInfo->path.z + pathLen - 4, ".lua", 4)) && !isValidScript(buf, len)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ } else if (pathLen > 3 && (0 == strncmp(createInfo->path.z + pathLen - 3, ".so", 3))) {
+ void *handle = taosLoadDll(createInfo->path.z);
+ taosCloseDll(handle);
+ if (handle == NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+ }
+
+ //TODO CHECK CODE
+ if (len + sizeof(SCreateFuncMsg) > pSql->cmd.allocSize) {
+ ret = tscAllocPayload(&pSql->cmd, len + sizeof(SCreateFuncMsg));
+ if (ret) {
+ tfree(buf);
+ return ret;
+ }
+ }
+
+ SCreateFuncMsg *pMsg = (SCreateFuncMsg *)pSql->cmd.payload;
+
+ strcpy(pMsg->name, createInfo->name.z);
+ strcpy(pMsg->path, createInfo->path.z);
+
+ pMsg->funcType = htonl(createInfo->type);
+ pMsg->bufSize = htonl(createInfo->bufSize);
+
+ pMsg->outputType = createInfo->output.type;
+ pMsg->outputLen = htons(createInfo->output.bytes);
+
+ pMsg->codeLen = htonl(len);
+ memcpy(pMsg->code, buf, len);
+ tfree(buf);
+
+ break;
+ }
+ case TSDB_SQL_DROP_FUNCTION: {
+ SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
+
+ SDropFuncMsg *pMsg = (SDropFuncMsg *)pSql->cmd.payload;
+
+ t0->z[t0->n] = 0;
+
+ strdequote(t0->z);
+
+ if (strlen(t0->z) >= TSDB_FUNC_NAME_LEN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ strcpy(pMsg->name, t0->z);
+
+ break;
+ }
+ default:
+ return TSDB_CODE_TSC_APP_ERROR;
}
return TSDB_CODE_SUCCESS;
}
-int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
+int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pInfo == NULL || pSql == NULL) {
return TSDB_CODE_TSC_APP_ERROR;
}
@@ -258,7 +543,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->msg);
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfoS(pCmd);
if (pQueryInfo == NULL) {
pRes->code = terrno;
return pRes->code;
@@ -280,39 +565,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DROP_DB: {
const char* msg2 = "invalid name";
const char* msg3 = "param name too long";
- const char* msg4 = "table is not super table";
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pInfo->type == TSDB_SQL_DROP_DB) {
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
- code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
+ code = tscSetTableFullName(&pTableMetaInfo->name, pzName, pSql);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
-
- if (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE) {
- code = tscGetTableMeta(pSql, pTableMetaInfo);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
- }
- }
-
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
if (pzName->type == TK_STRING) {
pzName->n = strdequote(pzName->z);
@@ -320,7 +592,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
strncpy(pCmd->payload, pzName->z, pzName->n);
} else { // drop user/account
if (pzName->n >= TSDB_USER_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pCmd->payload, pzName->z, pzName->n);
@@ -334,12 +606,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
break;
@@ -351,7 +623,17 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_SHOW: {
if (setShowInfo(pSql, pInfo) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ break;
+ }
+
+ case TSDB_SQL_CREATE_FUNCTION:
+ case TSDB_SQL_DROP_FUNCTION: {
+ code = handleUserDefinedFunc(pSql, pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
break;
@@ -364,23 +646,23 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
if (pCreateDB->dbname.n >= TSDB_DB_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char buf[TSDB_DB_NAME_LEN] = {0};
SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf));
if (tscValidateName(&token) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &token);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (parseCreateDBOptions(pCmd, pCreateDB) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
break;
@@ -390,7 +672,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg = "invalid host name (ip address)";
if (taosArrayGetSize(pInfo->pMiscInfo->a) > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
@@ -410,15 +692,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pPwd = &pInfo->pMiscInfo->user.passwd;
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pName->n >= TSDB_USER_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (tscValidateName(pName) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SCreateAcctInfo* pAcctOpt = &pInfo->pMiscInfo->acctOpt;
@@ -428,7 +710,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) {
} else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) {
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -440,25 +722,26 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// additional msg has been attached already
- code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
+ code = tscSetTableFullName(&pTableMetaInfo->name, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
return tscGetTableMeta(pSql, pTableMetaInfo);
}
+ case TSDB_SQL_SHOW_CREATE_STABLE:
case TSDB_SQL_SHOW_CREATE_TABLE: {
const char* msg1 = "invalid table name";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- code = tscSetTableFullName(pTableMetaInfo, pToken, pSql);
+ code = tscSetTableFullName(&pTableMetaInfo->name, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -470,14 +753,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pToken->n > TSDB_DB_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- return tscSetTableFullName(pTableMetaInfo, pToken, pSql);
+ return tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
}
case TSDB_SQL_CFG_DNODE: {
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
@@ -488,7 +770,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
/* validate the parameter names and options */
if (validateDNodeConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* pMsg = pCmd->payload;
@@ -502,7 +784,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
strncpy(pCfg->ep, t0->z, t0->n);
if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pCfg->config, t1->z, t1->n);
@@ -531,21 +813,21 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pPwd = &pUser->passwd;
if (pName->n >= TSDB_USER_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (tscValidateName(pName) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pCmd->command == TSDB_SQL_CREATE_USER) {
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else {
if (pUser->type == TSDB_ALTER_USER_PASSWD) {
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else if (pUser->type == TSDB_ALTER_USER_PRIVILEGES) {
assert(pPwd->type == TSDB_DATA_TYPE_NULL);
@@ -559,10 +841,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (strncasecmp(pPrivilege->z, "write", 5) == 0 && pPrivilege->n == 5) {
pCmd->count = 3;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
@@ -575,7 +857,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the parameter names and options
if (validateLocalConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a);
@@ -615,53 +897,60 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SELECT: {
- const char* msg1 = "columns in select clause not identical";
+ const char * msg1 = "no nested query supported in union clause";
+ code = loadAllTableMeta(pSql, pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ pQueryInfo = tscGetQueryInfo(pCmd);
+
+ size_t size = taosArrayGetSize(pInfo->list);
+ for (int32_t i = 0; i < size; ++i) {
+ SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i);
+
+ tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size);
- for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) {
- SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i);
- if (pqi == NULL) {
- pRes->code = terrno;
- return pRes->code;
+ if (size > 1 && pSqlNode->from && pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- }
- assert(pCmd->numOfClause == pInfo->subclauseInfo.numOfClause);
- for (int32_t i = pCmd->clauseIndex; i < pInfo->subclauseInfo.numOfClause; ++i) {
- SQuerySqlNode* pQuerySqlNode = pInfo->subclauseInfo.pClause[i];
- tscTrace("%p start to parse %dth subclause, total:%d", pSql, i, pInfo->subclauseInfo.numOfClause);
- if ((code = doValidateSqlNode(pSql, pQuerySqlNode, i)) != TSDB_CODE_SUCCESS) {
+// normalizeSqlNode(pSqlNode); // normalize the column name in each function
+ if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code;
}
- tscPrintSelectClause(pSql, i);
- pCmd->clauseIndex += 1;
- }
+ tscPrintSelNodeList(pSql, i);
- // restore the clause index
- pCmd->clauseIndex = 0;
- // set the command/global limit parameters from the first subclause to the sqlcmd object
- SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0);
- pCmd->command = pQueryInfo1->command;
- int32_t diffSize = 0;
-
- // if there is only one element, the limit of clause is the limit of global result.
- for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
- SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i);
+ if ((i + 1) < size && pQueryInfo->sibling == NULL) {
+ if ((code = tscAddQueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo, &diffSize);
- if (ret != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ SArray *pUdfInfo = NULL;
+ if (pQueryInfo->pUdfInfo) {
+ pUdfInfo = taosArrayDup(pQueryInfo->pUdfInfo);
+ }
+
+ pQueryInfo = pCmd->active;
+ pQueryInfo->pUdfInfo = pUdfInfo;
+ pQueryInfo->udfCopy = true;
}
}
- if (diffSize) {
- for (int32_t i = 1; i < pCmd->numOfClause; ++i) {
- SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pCmd, i);
- tscFieldInfoSetSize(&pQueryInfo1->fieldsInfo, &pQueryInfo2->fieldsInfo);
- }
+ if ((code = normalizeVarDataTypeLength(pCmd)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // set the command/global limit parameters from the first subclause to the sqlcmd object
+ pCmd->active = pCmd->pQueryInfo;
+ pCmd->command = pCmd->pQueryInfo->command;
+
+ STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pCmd->active, 0);
+ if (pTableMetaInfo1->pTableMeta != NULL) {
+ pSql->res.precision = tscGetTableInfo(pTableMetaInfo1->pTableMeta).precision;
}
- pCmd->parseFinished = 1;
return TSDB_CODE_SUCCESS; // do not build query message here
}
@@ -689,20 +978,25 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+ break;
+ }
+ case TSDB_SQL_COMPACT_VNODE:{
+ const char* msg = "invalid compact";
+ if (setCompactVnodeInfo(pSql, pInfo) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
break;
}
-
default:
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
- pSql->cmd.parseFinished = 1;
if (tscBuildMsg[pCmd->command] != NULL) {
return tscBuildMsg[pCmd->command](pSql, pInfo);
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support sql expression");
}
}
@@ -711,10 +1005,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
* are available.
*/
static bool isTopBottomQuery(SQueryInfo* pQueryInfo) {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
+ int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
return true;
@@ -725,8 +1019,8 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) {
}
// need to add timestamp column in result set, if it is a time window query
-static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo) {
- uint64_t uid = tscSqlExprGet(pQueryInfo, 0)->uid;
+static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
+ uint64_t uid = tscExprGet(pQueryInfo, 0)->base.uid;
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
@@ -738,14 +1032,14 @@ static int32_t addPrimaryTsColumnForTimeWindowQuery(SQueryInfo* pQueryInfo) {
}
if (tableIndex == COLUMN_INDEX_INITIAL_VAL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
SSchema s = {.bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX};
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name));
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
+ tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL, getNewResColId(pCmd));
return TSDB_CODE_SUCCESS;
}
@@ -761,7 +1055,7 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn
// order by normal column is not supported
int32_t colId = pQueryInfo->order.orderColId;
if (isTimeWindowQuery(pQueryInfo) && colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -771,11 +1065,11 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn
* invalid sql:
* select count(tbname)/count(tag1)/count(tag2) from super_table_name [interval(1d)|session(ts, 1d)];
*/
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -784,24 +1078,26 @@ static int32_t checkInvalidExprForTimeWindow(SSqlCmd* pCmd, SQueryInfo* pQueryIn
* select tbname, tags_fields from super_table_name [interval(1s)|session(ts,1s)]
*/
if (tscQueryTags(pQueryInfo) && isTimeWindowQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- return addPrimaryTsColumnForTimeWindowQuery(pQueryInfo);
+ return addPrimaryTsColumnForTimeWindowQuery(pQueryInfo, pCmd);
}
-int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode) {
- const char* msg2 = "interval cannot be less than 10 ms";
- const char* msg3 = "sliding cannot be used without interval";
+int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
+ const char* msg1 = "sliding cannot be used without interval";
+ const char* msg2 = "interval cannot be less than 1 us";
+ const char* msg3 = "interval value is too small";
+ const char* msg4 = "only point interpolation query requires keyword EVERY";
SSqlCmd* pCmd = &pSql->cmd;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
- if (!TPARSER_HAS_TOKEN(pQuerySqlNode->interval.interval)) {
- if (TPARSER_HAS_TOKEN(pQuerySqlNode->sliding)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+
+ if (!TPARSER_HAS_TOKEN(pSqlNode->interval.interval)) {
+ if (TPARSER_HAS_TOKEN(pSqlNode->sliding)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return TSDB_CODE_SUCCESS;
@@ -813,72 +1109,137 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySqlNode
}
// interval is not null
- SStrToken *t = &pQuerySqlNode->interval.interval;
- if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ SStrToken *t = &pSqlNode->interval.interval;
+ if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval,
+ &pQueryInfo->interval.intervalUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
- // if the unit of time window value is millisecond, change the value from microsecond
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
- }
+ if (pQueryInfo->interval.interval <= 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
// interval cannot be less than 10 milliseconds
- if (pQueryInfo->interval.interval < tsMinIntervalTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
- if (parseIntervalOffset(pCmd, pQueryInfo, &pQuerySqlNode->interval.offset) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (parseIntervalOffset(pCmd, pQueryInfo, &pSqlNode->interval.offset) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (parseSlidingClause(pCmd, pQueryInfo, &pSqlNode->sliding) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if (parseSlidingClause(pCmd, pQueryInfo, &pQuerySqlNode->sliding) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ bool interpQuery = tscIsPointInterpQuery(pQueryInfo);
+ if ((pSqlNode->interval.token == TK_EVERY && (!interpQuery)) || (pSqlNode->interval.token == TK_INTERVAL && interpQuery)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
// The following part is used to check for the invalid query expression.
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
}
+static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
+
+ const char* msg1 = "invalid column name";
+ const char* msg2 = "invalid column type";
+ const char* msg3 = "not support state_window with group by ";
+ const char* msg4 = "function not support for super table query";
+ const char* msg5 = "not support state_window on tag column";
+
+ SStrToken *col = &(pSqlNode->windowstateVal.col) ;
+ if (col->z == NULL || col->n <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (pQueryInfo->colList == NULL) {
+ pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
+ }
+ if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ pQueryInfo->groupbyExpr.numOfGroupCols = 1;
+
+ //TODO(dengyihao): check tag column
+ if (isStable) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ } else if (index.columnIndex >= numOfCols) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+
+ SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
+ if (pGroupExpr->columnInfo == NULL) {
+ pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
+ }
+
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT
+ || pSchema->type == TSDB_DATA_TYPE_DOUBLE || pSchema->type == TSDB_DATA_TYPE_NCHAR
+ || pSchema->type == TSDB_DATA_TYPE_BINARY) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
+ SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
+ taosArrayPush(pGroupExpr->columnInfo, &colIndex);
+ pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
+ pQueryInfo->stateWindow = true;
+ return TSDB_CODE_SUCCESS;
+}
-int32_t parseSessionClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode * pQuerySqlNode) {
+int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pSqlNode) {
const char* msg1 = "gap should be fixed time window";
const char* msg2 = "only one type time window allowed";
const char* msg3 = "invalid column name";
const char* msg4 = "invalid time window";
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
// no session window
- if (!TPARSER_HAS_TOKEN(pQuerySqlNode->sessionVal.gap)) {
+ if (!TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)) {
return TSDB_CODE_SUCCESS;
}
- SStrToken* col = &pQuerySqlNode->sessionVal.col;
- SStrToken* gap = &pQuerySqlNode->sessionVal.gap;
+ SStrToken* col = &pSqlNode->sessionVal.col;
+ SStrToken* gap = &pSqlNode->sessionVal.gap;
char timeUnit = 0;
- if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (timeUnit == 'y' || timeUnit == 'n') {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
-
- // if the unit of time window value is millisecond, change the value from microsecond
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->sessionWindow.gap = pQueryInfo->sessionWindow.gap / 1000;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ if (pQueryInfo->sessionWindow.gap == 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, col, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (getColumnIndexByName(col, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -902,33 +1263,30 @@ int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* of
return TSDB_CODE_SUCCESS;
}
- if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset,
+ &pQueryInfo->interval.offsetUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pQueryInfo->interval.offset < 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
- // if the unit of time window value is millisecond, change the value from microsecond
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
- }
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
} else if (pQueryInfo->interval.offsetUnit == pQueryInfo->interval.intervalUnit) {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pQueryInfo->interval.intervalUnit == 'n' && pQueryInfo->interval.offsetUnit == 'y') {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else if (pQueryInfo->interval.intervalUnit == 'y' && pQueryInfo->interval.offsetUnit == 'n') {
if (pQueryInfo->interval.interval * 12 <= pQueryInfo->interval.offset) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
// TODO: offset should be shorter than interval, but how to check
@@ -949,41 +1307,35 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+ SInterval* pInterval = &pQueryInfo->interval;
if (pSliding->n == 0) {
- pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
- pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
+ pInterval->slidingUnit = pInterval->intervalUnit;
+ pInterval->sliding = pInterval->interval;
return TSDB_CODE_SUCCESS;
}
- if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
- pQueryInfo->interval.sliding /= 1000;
- }
+ parseAbsoluteDuration(pSliding->z, pSliding->n, &pInterval->sliding, &pInterval->slidingUnit, tinfo.precision);
- if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (pInterval->sliding < convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
- if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (pInterval->sliding > pInterval->interval) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if ((pInterval->interval != 0) && (pInterval->interval/pInterval->sliding > INTERVAL_SLIDING_FACTOR)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
-// if (pQueryInfo->interval.sliding != pQueryInfo->interval.interval && pSql->pStream == NULL) {
-// return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
-// }
-
return TSDB_CODE_SUCCESS;
}
-int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
+int32_t tscSetTableFullName(SName* pName, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
const char* msg3 = "no acctId";
@@ -996,52 +1348,53 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableNam
if (idx != -1) { // db has been specified in sql string so we ignore current db path
char* acctId = getAccountId(pSql);
if (acctId == NULL || strlen(acctId) <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
-
- code = tNameSetAcctId(&pTableMetaInfo->name, acctId);
+
+ code = tNameSetAcctId(pName, acctId);
if (code != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- if (idx >= TSDB_DB_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ if (idx >= TSDB_DB_NAME_LEN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
+
if (pTableName->n - 1 - idx >= TSDB_TABLE_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
-
-
+
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
- code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_DB|T_NAME_TABLE);
+ code = tNameFromString(pName, name, T_NAME_DB|T_NAME_TABLE);
if (code != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else { // get current DB name first, and then set it into path
char* t = cloneCurrentDBName(pSql);
if (strlen(t) == 0) {
+ tfree(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
- code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB);
+ code = tNameFromString(pName, t, T_NAME_ACCT | T_NAME_DB);
if (code != 0) {
- free(t);
+ tfree(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
- free(t);
+ tfree(t);
if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
- code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_TABLE);
+ code = tNameFromString(pName, name, T_NAME_TABLE);
if (code != 0) {
- code = invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ code = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -1058,18 +1411,22 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
const char* msg4 = "invalid data type";
const char* msg5 = "invalid binary/nchar column length";
const char* msg6 = "invalid column name";
+ const char* msg7 = "too many columns";
// number of fields no less than 2
size_t numOfCols = taosArrayGetSize(pFieldList);
- if (numOfCols <= 1 || numOfCols > TSDB_MAX_COLUMNS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ if (numOfCols <= 1 ) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return false;
+ } else if (numOfCols > TSDB_MAX_COLUMNS) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
return false;
}
// first column must be timestamp
TAOS_FIELD* pField = taosArrayGet(pFieldList, 0);
if (pField->type != TSDB_DATA_TYPE_TIMESTAMP) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
@@ -1077,29 +1434,29 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
for (int32_t i = 0; i < numOfCols; ++i) {
pField = taosArrayGet(pFieldList, i);
if (!isValidDataType(pField->type)) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
return false;
}
if (pField->bytes == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
if ((pField->type == TSDB_DATA_TYPE_BINARY && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_BINARY_LEN)) ||
(pField->type == TSDB_DATA_TYPE_NCHAR && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_NCHAR_LEN))) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
if (validateColumnName(pField->name) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
return false;
}
// field name must be unique
if (has(pFieldList, i + 1, pField->name) == true) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
@@ -1108,7 +1465,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
// max row length must be less than TSDB_MAX_BYTES_PER_ROW
if (nLen > TSDB_MAX_BYTES_PER_ROW) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
@@ -1122,7 +1479,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
const char* msg1 = "invalid number of tag columns";
const char* msg2 = "tag length too long";
const char* msg3 = "duplicated column names";
- const char* msg4 = "timestamp not allowed in tags";
+ //const char* msg4 = "timestamp not allowed in tags";
const char* msg5 = "invalid data type in tags";
const char* msg6 = "invalid tag name";
const char* msg7 = "invalid binary/nchar tag length";
@@ -1130,37 +1487,30 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
// number of fields at least 1
size_t numOfTags = taosArrayGetSize(pTagsList);
if (numOfTags < 1 || numOfTags > TSDB_MAX_TAGS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
- /* timestamp in tag is not allowed */
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
-
- if (p->type == TSDB_DATA_TYPE_TIMESTAMP) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
- return false;
- }
-
if (!isValidDataType(p->type)) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
if ((p->type == TSDB_DATA_TYPE_BINARY && p->bytes <= 0) ||
(p->type == TSDB_DATA_TYPE_NCHAR && p->bytes <= 0)) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
return false;
}
if (validateColumnName(p->name) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
return false;
}
if (has(pTagsList, i + 1, p->name) == true) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
}
@@ -1169,7 +1519,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (p->bytes == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
return false;
}
@@ -1178,7 +1528,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
// max tag row length must be less than TSDB_MAX_TAGS_LEN
if (nLen > TSDB_MAX_TAGS_LEN) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
@@ -1187,7 +1537,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (has(pFieldList, 0, p->name) == true) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
}
@@ -1198,40 +1548,40 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
/*
* tags name /column name is truncated in sql.y
*/
-bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
- const char* msg1 = "timestamp not allowed in tags";
- const char* msg2 = "duplicated column names";
+int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
const char* msg3 = "tag length too long";
const char* msg4 = "invalid tag name";
const char* msg5 = "invalid binary/nchar tag length";
const char* msg6 = "invalid data type in tags";
+ const char* msg7 = "too many columns";
- assert(pCmd->numOfClause == 1);
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfTags = tscGetNumOfTags(pTableMeta);
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
-
+
+ // no more max columns
+ if (numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
+
// no more than 6 tags
if (numOfTags == TSDB_MAX_TAGS) {
char msg[128] = {0};
sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS);
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
// no timestamp allowable
- if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- return false;
- }
+ //if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) {
+ // invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ // return false;
+ //}
if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_UBIGINT)) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@@ -1243,20 +1593,17 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
// length less than TSDB_MAX_TASG_LEN
if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// tags name can not be a keyword
if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
// binary(val), val can not be equalled to or less than 0
if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
// field name must be unique
@@ -1264,24 +1611,22 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
if (strncasecmp(pTagField->name, pSchema[i].name, sizeof(pTagField->name) - 1) == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
}
}
- return true;
+ return TSDB_CODE_SUCCESS;
}
-bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
+int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
const char* msg1 = "too many columns";
- const char* msg2 = "duplicated column names";
const char* msg3 = "column length too long";
const char* msg4 = "invalid data type";
const char* msg5 = "invalid column name";
const char* msg6 = "invalid column length";
- assert(pCmd->numOfClause == 1);
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+// assert(pCmd->numOfClause == 1);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfTags = tscGetNumOfTags(pTableMeta);
@@ -1289,18 +1634,15 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
// no more max columns
if (numOfCols >= TSDB_MAX_COLUMNS || numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_UBIGINT) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
SSchema* pSchema = tscGetTableSchema(pTableMeta);
@@ -1311,25 +1653,23 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
}
if (pColField->bytes <= 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// length less than TSDB_MAX_BYTES_PER_ROW
if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// field name must be unique
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return false;
+ //return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pColField->name, NULL);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
}
}
- return true;
+ return TSDB_CODE_SUCCESS;
}
/* is contained in pFieldList or not */
@@ -1345,18 +1685,10 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
-static char* cloneCurrentDBName(SSqlObj* pSql) {
- pthread_mutex_lock(&pSql->pTscObj->mutex);
- char *p = strdup(pSql->pTscObj->db);
- pthread_mutex_unlock(&pSql->pTscObj->mutex);
-
- return p;
-}
-
/* length limitation, strstr cannot be applied */
static int32_t getDelimiterIndex(SStrToken* pTableName) {
- for (uint32_t i = 0; i < pTableName->n; ++i) {
- if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
+ for (uint32_t i = 0; i < pTableName->n; ++i) {
+ if (pTableName->z[i] == TS_PATH_DELIMITER[0]) {
return i;
}
}
@@ -1376,7 +1708,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
/* db name is not specified, the tableName dose not include db name */
if (pDB != NULL) {
if (pDB->n >= TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
memcpy(&fullName[totalLen], pDB->z, pDB->n);
@@ -1390,12 +1722,12 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
/* here we only check the table name length limitation */
if (!tscValidateTableNameLength(tableName->n)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else { // pDB == NULL, the db prefix name is specified in tableName
/* the length limitation includes tablename + dbname + sep */
if (tableName->n >= TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -1411,28 +1743,25 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
fullName[totalLen] = 0;
}
- return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
+ return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_OPERATION;
}
-void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
- SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscColumnListInsert(pQueryInfo->colList, &tsCol);
+void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t tableUid) {
+ SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX};
+ tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s);
}
-static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSqlExprItem* pItem) {
+static int32_t handleArithmeticExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t exprIndex, tSqlExprItem* pItem) {
const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables";
const char* msg2 = "invalid arithmetic expression in select clause";
const char* msg3 = "tag columns can not be used in arithmetic expression";
const char* msg4 = "columns from different table mixed up in arithmetic expression";
- // arithmetic function in select clause
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
-
SColumnList columnList = {0};
int32_t arithmeticType = NON_ARITHMEIC_EXPR;
if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t tableIndex = columnList.ids[0].tableIndex;
@@ -1442,19 +1771,19 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
// all columns in arithmetic expression must belong to the same table
for (int32_t f = 1; f < columnList.num; ++f) {
if (columnList.ids[f].tableIndex != tableIndex) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
// expr string is set as the parameter of function
SColumnIndex index = {.tableIndex = tableIndex};
- SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double),
- getNewResColId(pQueryInfo), sizeof(double), false);
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double),
+ getNewResColId(pCmd), sizeof(double), false);
- char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z;
- size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1);
- tstrncpy(pExpr->aliasName, name, len);
+ char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->exprToken.z;
+ size_t len = MIN(sizeof(pExpr->base.aliasName), pItem->pNode->exprToken.n + 1);
+ tstrncpy(pExpr->base.aliasName, name, len);
tExprNode* pNode = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
@@ -1463,7 +1792,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
if (ret != TSDB_CODE_SUCCESS) {
taosArrayDestroy(colList);
tExprTreeDestroy(pNode, NULL);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// check for if there is a tag in the arithmetic express
@@ -1474,7 +1803,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
tExprTreeDestroy(pNode, NULL);
taosArrayDestroy(colList);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -1492,11 +1821,11 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
char* c = tbufGetData(&bw, false);
// set the serialized binary string as the parameter of arithmetic expression
- addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len);
- insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr);
+ tscExprAddParams(&pExpr->base, c, TSDB_DATA_TYPE_BINARY, (int32_t)len);
+ insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->base.aliasName, pExpr);
// add ts column
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
tbufCloseWriter(&bw);
taosArrayDestroy(colList);
@@ -1505,54 +1834,51 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
columnList.num = 0;
columnList.ids[0] = (SColumnIndex) {0, 0};
+ char rawName[TSDB_COL_NAME_LEN] = {0};
char aliasName[TSDB_COL_NAME_LEN] = {0};
- if (pItem->aliasName != NULL) {
- tstrncpy(aliasName, pItem->aliasName, TSDB_COL_NAME_LEN);
- } else {
- int32_t nameLen = MIN(TSDB_COL_NAME_LEN, pItem->pNode->token.n + 1);
- tstrncpy(aliasName, pItem->pNode->token.z, nameLen);
- }
+ getColumnName(pItem, aliasName, rawName, TSDB_COL_NAME_LEN);
insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, aliasName, NULL);
int32_t slot = tscNumOfFields(pQueryInfo) - 1;
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
+ assert(pInfo->pExpr == NULL);
- if (pInfo->pSqlExpr == NULL) {
- SExprInfo* pArithExprInfo = calloc(1, sizeof(SExprInfo));
-
- // arithmetic expression always return result in the format of double float
- pArithExprInfo->bytes = sizeof(double);
- pArithExprInfo->interBytes = sizeof(double);
- pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE;
+ SExprInfo* pExprInfo = calloc(1, sizeof(SExprInfo));
- pArithExprInfo->base.functionId = TSDB_FUNC_ARITHM;
- pArithExprInfo->base.numOfParams = 1;
- pArithExprInfo->base.resColId = getNewResColId(pQueryInfo);
+ // arithmetic expression always return result in the format of double float
+ pExprInfo->base.resBytes = sizeof(double);
+ pExprInfo->base.interBytes = 0;
+ pExprInfo->base.resType = TSDB_DATA_TYPE_DOUBLE;
- int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid);
- if (ret != TSDB_CODE_SUCCESS) {
- tExprTreeDestroy(pArithExprInfo->pExpr, NULL);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
- }
+ pExprInfo->base.functionId = TSDB_FUNC_ARITHM;
+ pExprInfo->base.numOfParams = 1;
+ pExprInfo->base.resColId = getNewResColId(pCmd);
+ strncpy(pExprInfo->base.aliasName, aliasName, tListLen(pExprInfo->base.aliasName));
+ strncpy(pExprInfo->base.token, rawName, tListLen(pExprInfo->base.token));
- pInfo->pArithExprInfo = pArithExprInfo;
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &(pExprInfo->base.uid));
+ if (ret != TSDB_CODE_SUCCESS) {
+ tExprTreeDestroy(pExprInfo->pExpr, NULL);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
+ pInfo->pExpr = pExprInfo;
+
SBufferWriter bw = tbufInitWriter(NULL, false);
TRY(0) {
- exprTreeToBinary(&bw, pInfo->pArithExprInfo->pExpr);
+ exprTreeToBinary(&bw, pInfo->pExpr->pExpr);
} CATCH(code) {
tbufCloseWriter(&bw);
UNUSED(code);
// TODO: other error handling
} END_TRY
- SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
- pFuncMsg->arg[0].argBytes = (int16_t) tbufTell(&bw);
- pFuncMsg->arg[0].argValue.pz = tbufGetData(&bw, true);
- pFuncMsg->arg[0].argType = TSDB_DATA_TYPE_BINARY;
+ SSqlExpr* pSqlExpr = &pInfo->pExpr->base;
+ pSqlExpr->param[0].nLen = (int16_t) tbufTell(&bw);
+ pSqlExpr->param[0].pz = tbufGetData(&bw, true);
+ pSqlExpr->param[0].nType = TSDB_DATA_TYPE_BINARY;
// tbufCloseWriter(&bw); // TODO there is a memory leak
}
@@ -1560,8 +1886,8 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
return TSDB_CODE_SUCCESS;
}
-static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSqlExprItem* pItem) {
- SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex);
+static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSqlExprItem* pItem, int32_t colId) {
+ SExprInfo* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex, colId);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@@ -1569,7 +1895,7 @@ static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumn
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
char* colName = (pItem->aliasName == NULL) ? pSchema->name : pItem->aliasName;
- tstrncpy(pExpr->aliasName, colName, sizeof(pExpr->aliasName));
+ tstrncpy(pExpr->base.aliasName, colName, sizeof(pExpr->base.aliasName));
SColumnList ids = {0};
ids.num = 1;
@@ -1580,15 +1906,15 @@ static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumn
ids.num = 0;
}
- insertResultField(pQueryInfo, startPos, &ids, pExpr->resBytes, (int8_t)pExpr->resType, pExpr->aliasName, pExpr);
+ insertResultField(pQueryInfo, startPos, &ids, pExpr->base.resBytes, (int8_t)pExpr->base.resType, pExpr->base.aliasName, pExpr);
}
-static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
+static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
// primary timestamp column has been added already
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return;
}
}
@@ -1600,8 +1926,8 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
// add the timestamp column into the output columns
SColumnIndex index = {0}; // primary timestamp column info
- int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
- tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL);
+ int32_t numOfCols = (int32_t)tscNumOfExprs(pQueryInfo);
+ tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols);
pSupInfo->visible = false;
@@ -1609,19 +1935,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
-bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
- if (pQueryInfo == NULL) {
- return false;
- }
- if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY) {
- return false;
- }
- if (tscQueryTags(pQueryInfo) && tscSqlExprNumOfExprs(pQueryInfo) == 1){
- return true;
- }
- return false;
-}
-
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
@@ -1637,105 +1950,211 @@ static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
return false;
}
+void genUdfList(SArray* pUdfInfo, tSqlExpr *pNode) {
+ if (pNode == NULL) {
+ return;
+ }
+
+ if (pNode->type == SQL_NODE_EXPR) {
+ genUdfList(pUdfInfo, pNode->pLeft);
+ genUdfList(pUdfInfo, pNode->pRight);
+ return;
+ }
+
+ if (pNode->type == SQL_NODE_SQLFUNCTION) {
+ pNode->functionId = isValidFunction(pNode->Expr.operand.z, pNode->Expr.operand.n);
+ if (pNode->functionId < 0) { // extract all possible user defined function
+ struct SUdfInfo info = {0};
+ info.name = strndup(pNode->Expr.operand.z, pNode->Expr.operand.n);
+ int32_t functionId = (int32_t)taosArrayGetSize(pUdfInfo) * (-1) - 1;
+ info.functionId = functionId;
+
+ taosArrayPush(pUdfInfo, &info);
+ }
+ }
+}
+
+/*
+static int32_t checkForUdf(SSqlObj* pSql, SQueryInfo* pQueryInfo, SArray* pSelection) {
+ if (pQueryInfo->pUdfInfo != NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pQueryInfo->pUdfInfo = taosArrayInit(4, sizeof(struct SUdfInfo));
+
+ size_t nExpr = taosArrayGetSize(pSelection);
+
+ for (int32_t i = 0; i < nExpr; ++i) {
+ tSqlExprItem* pItem = taosArrayGet(pSelection, i);
+
+ int32_t type = pItem->pNode->type;
+ if (type == SQL_NODE_EXPR || type == SQL_NODE_SQLFUNCTION) {
+ genUdfList(pQueryInfo->pUdfInfo, pItem->pNode);
+ }
+ }
+
+ if (taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
+ return tscGetUdfFromNode(pSql, pQueryInfo);
+ } else {
+ return TSDB_CODE_SUCCESS;
+ }
+}
+*/
+
+static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) {
+ if(pUdfInfo == NULL){
+ tscError("udfinfo is null");
+ return NULL;
+ }
+ size_t t = taosArrayGetSize(pUdfInfo);
+ for(int32_t i = 0; i < t; ++i) {
+ SUdfInfo* pUdf = taosArrayGet(pUdfInfo, i);
+ if (strlen(pUdf->name) == len && strncasecmp(pUdf->name, name, len) == 0) {
+ return pUdf;
+ }
+ }
+
+ return NULL;
+}
+
+int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelNodeList, bool joinQuery,
+ bool timeWindowQuery, bool outerQuery) {
+ assert(pSelNodeList != NULL && pCmd != NULL);
-int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectList, bool isSTable, bool joinQuery, bool timeWindowQuery) {
- assert(pSelectList != NULL && pCmd != NULL);
- const char* msg1 = "too many columns in selection clause";
+ const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
+ const char* msg4 = "not support distinct mixed with proj/agg func";
const char* msg5 = "invalid function name";
- const char* msg6 = "only support distinct one tag";
+ const char* msg6 = "not support distinct mixed with join";
+ const char* msg7 = "not support distinct mixed with groupby";
+ const char* msg8 = "not support distinct in nest query";
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
+ // too many result columns not support order by in query
+ if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
+
bool hasDistinct = false;
- size_t numOfExpr = taosArrayGetSize(pSelectList);
+ bool hasAgg = false;
+ size_t numOfExpr = taosArrayGetSize(pSelNodeList);
+ int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
- int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
- tSqlExprItem* pItem = taosArrayGet(pSelectList, i);
+ int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
+ tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
if (hasDistinct == false) {
hasDistinct = (pItem->distinct == true);
+ distIdx = hasDistinct ? i : -1;
}
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
- pItem->pNode->functionId = isValidFunction(pItem->pNode->operand.z, pItem->pNode->operand.n);
+ hasAgg = true;
+ if (hasDistinct) break;
+
+ pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
+
+ if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+
+ SUdfInfo* pUdfInfo = NULL;
if (pItem->pNode->functionId < 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
+ if (pUdfInfo == NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+
+ pItem->pNode->functionId = pUdfInfo->functionId;
}
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
- if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true, pUdfInfo) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else if (type == SQL_NODE_TABLE_COLUMN || type == SQL_NODE_VALUE) {
// use the dynamic array list to decide if the function is valid or not
- // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
- if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
- }
+ // select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
+ if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem, outerQuery) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
} else if (type == SQL_NODE_EXPR) {
- int32_t code = handleArithmeticExpr(pCmd, clauseIndex, i, pItem);
+ int32_t code = handleArithmeticExpr(pCmd, pQueryInfo, i, pItem);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
+ //TODO(dengyihao), refactor as function
+ //handle distinct func mixed with other func
if (hasDistinct == true) {
- if (!isValidDistinctSql(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ if (distIdx != 0 || hasAgg) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+ if (joinQuery) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+ if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
- pQueryInfo->distinctTag = true;
+ if (pQueryInfo->pDownstream != NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ }
+
+ pQueryInfo->distinct = true;
}
+
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if ((numOfSrcCols <= 0 || !hasNoneUserDefineExpr(pQueryInfo)) && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
- addPrimaryTsColIntoResult(pQueryInfo);
+ addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
}
-int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
- int8_t type, char* fieldName, SSqlExpr* pSqlExpr) {
-
- for (int32_t i = 0; i < pIdList->num; ++i) {
- int32_t tableId = pIdList->ids[i].tableIndex;
- STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[tableId];
-
- int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (pIdList->ids[i].columnIndex >= numOfCols) {
+int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pColList, int16_t bytes,
+ int8_t type, char* fieldName, SExprInfo* pSqlExpr) {
+ for (int32_t i = 0; i < pColList->num; ++i) {
+ int32_t tableIndex = pColList->ids[i].tableIndex;
+ STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[tableIndex]->pTableMeta;
+
+ int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ if (pColList->ids[i].columnIndex >= numOfCols) {
continue;
}
-
- tscColumnListInsert(pQueryInfo->colList, &(pIdList->ids[i]));
+
+ uint64_t uid = pTableMeta->id.uid;
+ SSchema* pSchema = tscGetTableSchema(pTableMeta);
+ tscColumnListInsert(pQueryInfo->colList, pColList->ids[i].columnIndex, uid, &pSchema[pColList->ids[i].columnIndex]);
}
TAOS_FIELD f = tscCreateField(type, fieldName, bytes);
SInternalField* pInfo = tscFieldInfoInsert(&pQueryInfo->fieldsInfo, outputIndex, &f);
- pInfo->pSqlExpr = pSqlExpr;
+ pInfo->pExpr = pSqlExpr;
return TSDB_CODE_SUCCESS;
}
-SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) {
+SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex, int32_t colId) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
@@ -1747,42 +2166,40 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tabl
if (functionId == TSDB_FUNC_TAGPRJ) {
index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema);
} else {
index.columnIndex = colIndex;
}
- int16_t colId = getNewResColId(pQueryInfo);
- return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes,
+ return tscExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, 0,
(functionId == TSDB_FUNC_TAGPRJ));
}
-SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
- SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) {
- int16_t colId = getNewResColId(pQueryInfo);
+SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
+ SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag, int16_t colId) {
+ SExprInfo* pExpr = tscExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
+ pColSchema->bytes, colId, 0, TSDB_COL_IS_TAG(flag));
+ tstrncpy(pExpr->base.aliasName, pColSchema->name, sizeof(pExpr->base.aliasName));
+ tstrncpy(pExpr->base.token, pColSchema->name, sizeof(pExpr->base.token));
- SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
- pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
- tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName));
-
- SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex);
+ SColumnList ids = createColumnList(1, pIndex->tableIndex, pIndex->columnIndex);
if (TSDB_COL_IS_TAG(flag)) {
ids.num = 0;
}
insertResultField(pQueryInfo, outputColIndex, &ids, pColSchema->bytes, pColSchema->type, pColSchema->name, pExpr);
- pExpr->colInfo.flag = flag;
+ pExpr->base.colInfo.flag = flag;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
if (TSDB_COL_IS_TAG(flag)) {
- tscColumnListInsert(pTableMetaInfo->tagColList, pIndex);
+ tscColumnListInsert(pTableMetaInfo->tagColList, pIndex->columnIndex, pTableMetaInfo->pTableMeta->id.uid, pColSchema);
}
return pExpr;
}
-static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, int32_t startPos) {
+static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, int32_t startPos, SSqlCmd* pCmd) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
int32_t numOfTotalColumns = 0;
@@ -1798,8 +2215,8 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
}
for (int32_t j = 0; j < numOfTotalColumns; ++j) {
- SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex);
- tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName));
+ SExprInfo* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex, getNewResColId(pCmd));
+ tstrncpy(pExpr->base.aliasName, pSchema[j].name, sizeof(pExpr->base.aliasName));
pIndex->columnIndex = j;
SColumnList ids = {0};
@@ -1812,128 +2229,161 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
return numOfTotalColumns;
}
-int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem) {
- const char* msg0 = "invalid column name";
+int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem, bool outerQuery) {
const char* msg1 = "tag for normal table query is not allowed";
+ const char* msg2 = "invalid column name";
+ const char* msg3 = "tbname not allowed in outer query";
- int32_t startPos = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
- int32_t optr = pItem->pNode->tokenId;
+ int32_t startPos = (int32_t)tscNumOfExprs(pQueryInfo);
+ int32_t tokenId = pItem->pNode->tokenId;
- if (optr == TK_ALL) { // project on all fields
+ if (tokenId == TK_ALL) { // project on all fields
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (getTableIndexByName(&pItem->pNode->columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// all meters columns are required
if (index.tableIndex == COLUMN_INDEX_INITIAL_VAL) { // all table columns are required.
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
index.tableIndex = i;
- int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos);
+ int32_t inc = doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd);
startPos += inc;
}
} else {
- doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos);
+ doAddProjectionExprAndResultFields(pQueryInfo, &index, startPos, pCmd);
}
// add the primary timestamp column even though it is not required by user
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
- } else if (optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) { // simple column projection query
+ STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[index.tableIndex]->pTableMeta;
+ if (pTableMeta->tableType != TSDB_TEMP_TABLE) {
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMeta->id.uid);
+ }
+ } else if (tokenId == TK_STRING || tokenId == TK_INTEGER || tokenId == TK_FLOAT) { // simple column projection query
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
// user-specified constant value as a new result column
index.columnIndex = (pQueryInfo->udColumnId--);
index.tableIndex = 0;
- SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->token, pItem->aliasName);
- SSqlExpr* pExpr =
- tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC);
+ SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->value, &pItem->pNode->exprToken, pItem->aliasName);
+ SExprInfo* pExpr = tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC,
+ getNewResColId(pCmd));
// NOTE: the first parameter is reserved for the tag column id during join query process.
- pExpr->numOfParams = 2;
- tVariantAssign(&pExpr->param[1], &pItem->pNode->value);
- } else if (optr == TK_ID) {
+ pExpr->base.numOfParams = 2;
+ tVariantAssign(&pExpr->base.param[1], &pItem->pNode->value);
+ } else if (tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (getColumnIndexByName(&pItem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- SSchema* colSchema = tGetTbnameColumnSchema();
- tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, colSchema, TSDB_COL_TAG);
- } else if (index.columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
- SSchema colSchema = tGetBlockDistColumnSchema();
- tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG);
+ if (outerQuery) {
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+
+ bool existed = false;
+ SSchema* pSchema = pTableMetaInfo->pTableMeta->schema;
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ if (strncasecmp(pSchema[i].name, TSQL_TBNAME_L, tListLen(pSchema[i].name)) == 0) {
+ existed = true;
+ index.columnIndex = i;
+ break;
+ }
+ }
+
+ if (!existed) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
+ SSchema colSchema = pSchema[index.columnIndex];
+ char name[TSDB_COL_NAME_LEN] = {0};
+ getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1);
+
+ tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN);
+ /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema,
+ TSDB_COL_NORMAL, getNewResColId(pCmd));
+ } else {
+ SSchema colSchema = *tGetTbnameColumnSchema();
+ char name[TSDB_COL_NAME_LEN] = {0};
+ getColumnName(pItem, name, colSchema.name, sizeof(colSchema.name) - 1);
+
+ tstrncpy(colSchema.name, name, TSDB_COL_NAME_LEN);
+ /*SExprInfo* pExpr = */ tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema,
+ TSDB_COL_TAG, getNewResColId(pCmd));
+ }
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- addProjectQueryCol(pQueryInfo, startPos, &index, pItem);
+ addProjectQueryCol(pQueryInfo, startPos, &index, pItem, getNewResColId(pCmd));
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
// add the primary timestamp column even though it is not required by user
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ if (!UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
+ }
} else {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
}
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc,
- const char* name, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult) {
+ const char* name, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult,
+ SUdfInfo* pUdfInfo) {
const char* msg1 = "not support column types";
- int16_t type = 0;
- int16_t bytes = 0;
- int32_t functionID = cvtFunc.execFuncId;
-
- if (functionID == TSDB_FUNC_SPREAD) {
+ int32_t f = cvtFunc.execFuncId;
+ if (f == TSDB_FUNC_SPREAD) {
int32_t t1 = pSchema->type;
- if (t1 == TSDB_DATA_TYPE_BINARY || t1 == TSDB_DATA_TYPE_NCHAR || t1 == TSDB_DATA_TYPE_BOOL) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (IS_VAR_DATA_TYPE(t1) || t1 == TSDB_DATA_TYPE_BOOL) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return -1;
- } else {
- type = TSDB_DATA_TYPE_DOUBLE;
- bytes = tDataTypes[type].bytes;
}
- } else {
- type = pSchema->type;
- bytes = pSchema->bytes;
}
-
- SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false);
- tstrncpy(pExpr->aliasName, name, tListLen(pExpr->aliasName));
- if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
- pExpr->colInfo.flag |= TSDB_COL_NULL;
+ int16_t resType = 0;
+ int16_t resBytes = 0;
+ int32_t interBufSize = 0;
+
+ getResultDataInfo(pSchema->type, pSchema->bytes, f, 0, &resType, &resBytes, &interBufSize, 0, false, pUdfInfo);
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, pColIndex, resType, resBytes, getNewResColId(pCmd), interBufSize, false);
+ tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName));
+
+ if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != f) {
+ pExpr->base.colInfo.flag |= TSDB_COL_NULL;
}
// set reverse order scan data blocks for last query
- if (functionID == TSDB_FUNC_LAST) {
- pExpr->numOfParams = 1;
- pExpr->param[0].i64 = TSDB_ORDER_DESC;
- pExpr->param[0].nType = TSDB_DATA_TYPE_INT;
+ if (f == TSDB_FUNC_LAST) {
+ pExpr->base.numOfParams = 1;
+ pExpr->base.param[0].i64 = TSDB_ORDER_DESC;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT;
}
// for all queries, the timestamp column needs to be loaded
- SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscColumnListInsert(pQueryInfo->colList, &index);
+ SSchema s = {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP,};
+ tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, pExpr->base.uid, &s);
// if it is not in the final result, do not add it
- SColumnList ids = getColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex);
+ SColumnList ids = createColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex);
if (finalResult) {
- insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->aliasName, pExpr);
+ insertResultField(pQueryInfo, resColIdx, &ids, resBytes, (int8_t)resType, pExpr->base.aliasName, pExpr);
} else {
- tscColumnListInsert(pQueryInfo->colList, &(ids.ids[0]));
+ tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
return TSDB_CODE_SUCCESS;
@@ -1957,28 +2407,43 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
}
} else { // use the user-input result column name
- int32_t len = MIN(pItem->pNode->token.n + 1, TSDB_COL_NAME_LEN);
- tstrncpy(name, pItem->pNode->token.z, len);
+ int32_t len = MIN(pItem->pNode->exprToken.n + 1, TSDB_COL_NAME_LEN);
+ tstrncpy(name, pItem->pNode->exprToken.z, len);
}
}
static void updateLastScanOrderIfNeeded(SQueryInfo* pQueryInfo) {
- if (pQueryInfo->sessionWindow.gap > 0 || tscGroupbyColumn(pQueryInfo)) {
- size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo);
+ if (pQueryInfo->sessionWindow.gap > 0 ||
+ pQueryInfo->stateWindow ||
+ taosArrayGetSize(pQueryInfo->pUpstream) > 0 ||
+ tscGroupbyColumn(pQueryInfo)) {
+ size_t numOfExpr = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExpr; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId != TSDB_FUNC_LAST && pExpr->functionId != TSDB_FUNC_LAST_DST) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId != TSDB_FUNC_LAST && pExpr->base.functionId != TSDB_FUNC_LAST_DST) {
continue;
}
- pExpr->numOfParams = 1;
- pExpr->param->i64 = TSDB_ORDER_ASC;
- pExpr->param->nType = TSDB_DATA_TYPE_INT;
+ pExpr->base.numOfParams = 1;
+ pExpr->base.param->i64 = TSDB_ORDER_ASC;
+ pExpr->base.param->nType = TSDB_DATA_TYPE_INT;
}
}
}
-int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) {
+static UNUSED_FUNC void updateFunctionInterBuf(SQueryInfo* pQueryInfo, bool superTable, SUdfInfo* pUdfInfo) {
+ size_t numOfExpr = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExpr; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+
+ int32_t param = (int32_t)pExpr->base.param[0].i64;
+ getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, pExpr->base.functionId, param, &pExpr->base.resType, &pExpr->base.resBytes,
+ &pExpr->base.interBytes, 0, superTable, pUdfInfo);
+ }
+}
+
+int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult,
+ SUdfInfo* pUdfInfo) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t functionId = pItem->pNode->functionId;
@@ -1987,61 +2452,49 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg3 = "illegal column name";
const char* msg4 = "invalid table name";
const char* msg5 = "parameter is out of range [0, 100]";
- const char* msg6 = "function applied to tags not allowed";
+ const char* msg6 = "functions applied to tags are not allowed";
const char* msg7 = "normal table can not apply this function";
const char* msg8 = "multi-columns selection does not support alias column name";
- const char* msg9 = "diff can no be applied to unsigned numeric type";
- const char* msg10 = "parameter is out of range [1, 100]";
+ const char* msg9 = "diff/derivative can no be applied to unsigned numeric type";
+ const char* msg10 = "derivative duration should be greater than 1 Second";
+ const char* msg11 = "third parameter in derivative should be 0 or 1";
+ const char* msg12 = "parameter is out of range [1, 100]";
+ const char* msg13 = "parameter list required";
switch (functionId) {
case TSDB_FUNC_COUNT: {
- /* more than one parameter for count() function */
- if (pItem->pNode->pParam != NULL && taosArrayGetSize(pItem->pNode->pParam) != 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ /* more than one parameter for count() function */
+ if (pItem->pNode->Expr.paramList != NULL && taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SSqlExpr* pExpr = NULL;
+ SExprInfo* pExpr = NULL;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (pItem->pNode->pParam != NULL) {
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
- SStrToken* pToken = &pParamElem->pNode->colInfo;
- int16_t sqlOptr = pParamElem->pNode->tokenId;
- if ((pToken->z == NULL || pToken->n == 0)
- && (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
- }
- if (sqlOptr == TK_ALL) {
- // select table.*
+ if (pItem->pNode->Expr.paramList != NULL) {
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
+ SStrToken* pToken = &pParamElem->pNode->columnName;
+ int16_t tokenId = pParamElem->pNode->tokenId;
+ if ((pToken->z == NULL || pToken->n == 0) && (TK_INTEGER != tokenId)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
+ // select count(table.*), select count(1), count(2)
+ if (tokenId == TK_ALL || tokenId == TK_INTEGER) {
// check if the table name is valid or not
- SStrToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->columnName;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
- } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
- char buf[8] = {0};
- int64_t val = -1;
- tVariant* pVariant = &pParamElem->pNode->value;
- if (pVariant->nType == TSDB_DATA_TYPE_BIGINT) {
- tVariantDump(pVariant, buf, TSDB_DATA_TYPE_BIGINT, true);
- val = GET_INT64_VAL(buf);
- }
- if (val == 1) {
- index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
- } else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
- }
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false);
} else {
- // count the number of meters created according to the super table
- if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ // count the number of table created according to the super table
+ if (getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -2054,80 +2507,85 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag);
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, isTag);
}
} else { // count(*) is equalled to count(primary_timestamp_key)
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
int32_t size = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pCmd), size, false);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
+ memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
+ getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1);
- SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex);
+ SColumnList list = createColumnList(1, index.tableIndex, index.columnIndex);
if (finalResult) {
int32_t numOfOutput = tscNumOfFields(pQueryInfo);
- insertResultField(pQueryInfo, numOfOutput, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->aliasName, pExpr);
+ insertResultField(pQueryInfo, numOfOutput, &list, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, pExpr->base.aliasName, pExpr);
} else {
- for (int32_t i = 0; i < ids.num; ++i) {
- tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i]));
+ for (int32_t i = 0; i < list.num; ++i) {
+ SSchema* ps = tscGetTableSchema(pTableMetaInfo->pTableMeta);
+ tscColumnListInsert(pQueryInfo->colList, list.ids[i].columnIndex, pTableMetaInfo->pTableMeta->id.uid,
+ &ps[list.ids[i].columnIndex]);
}
}
// the time stamp may be always needed
if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
}
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_SUM:
case TSDB_FUNC_AVG:
case TSDB_FUNC_RATE:
case TSDB_FUNC_IRATE:
- case TSDB_FUNC_SUM_RATE:
- case TSDB_FUNC_SUM_IRATE:
- case TSDB_FUNC_AVG_RATE:
- case TSDB_FUNC_AVG_IRATE:
case TSDB_FUNC_TWA:
case TSDB_FUNC_MIN:
case TSDB_FUNC_MAX:
case TSDB_FUNC_DIFF:
+ case TSDB_FUNC_DERIVATIVE:
case TSDB_FUNC_STDDEV:
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
- if (pItem->pNode->pParam == NULL || (functionId != TSDB_FUNC_LEASTSQR && taosArrayGetSize(pItem->pNode->pParam) != 1) ||
- (functionId == TSDB_FUNC_LEASTSQR && taosArrayGetSize(pItem->pNode->pParam) != 3)) {
- /* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList);
+
+ // no parameters or more than one parameter for function
+ if (pItem->pNode->Expr.paramList == NULL ||
+ (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
+ ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if ((getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+
+ // functions can not be applied to tags
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta))) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. check if sql function can be applied on this column data type
- pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
- int16_t colType = pSchema->type;
- if (!IS_NUMERIC_TYPE(colType)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- } else if (IS_UNSIGNED_NUMERIC_TYPE(colType) && functionId == TSDB_FUNC_DIFF) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ if (!IS_NUMERIC_TYPE(pSchema->type)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
int16_t resultType = 0;
@@ -2135,100 +2593,129 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
int32_t intermediateResSize = 0;
if (getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize,
- &intermediateResSize, 0, false) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ &intermediateResSize, 0, false, NULL) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// set the first column ts for diff query
- if (functionId == TSDB_FUNC_DIFF) {
- colIndex += 1;
+ if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
- SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE,
- getNewResColId(pQueryInfo), TSDB_KEYSIZE, false);
-
- SColumnList ids = getColumnList(1, 0, 0);
- insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
- }
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
+ TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
+ tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS_DUMMY].name, sizeof(pExpr->base.aliasName));
- // functions can not be applied to tags
- if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ SColumnList ids = createColumnList(1, 0, 0);
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
- SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
- if (functionId == TSDB_FUNC_LEASTSQR) {
- /* set the leastsquares parameters */
+ if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
char val[8] = {0};
if (tVariantDump(&pParamElem[1].pNode->value, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES);
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES);
memset(val, 0, tListLen(val));
if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES);
+ } else if (functionId == TSDB_FUNC_IRATE) {
+ int64_t prec = info.precision;
+ tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
+ } else if (functionId == TSDB_FUNC_DERIVATIVE) {
+ char val[8] = {0};
+
+ int64_t tickPerSec = 0;
+ if (tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (info.precision == TSDB_TIME_PRECISION_MILLI) {
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
+ } else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
+ tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
+ }
+
+ if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ }
+
+ tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
+ memset(val, 0, tListLen(val));
+
+ if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ int64_t v = *(int64_t*) val;
+ if (v != 0 && v != 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
- addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
}
- SColumnList ids = {0};
- ids.num = 1;
- ids.ids[0] = index;
-
- memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
-
+ SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+
+ memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
+ getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1);
+
if (finalResult) {
int32_t numOfOutput = tscNumOfFields(pQueryInfo);
- insertResultField(pQueryInfo, numOfOutput, &ids, pExpr->resBytes, (int32_t)pExpr->resType, pExpr->aliasName, pExpr);
+ insertResultField(pQueryInfo, numOfOutput, &ids, pExpr->base.resBytes, (int32_t)pExpr->base.resType,
+ pExpr->base.aliasName, pExpr);
} else {
- for (int32_t i = 0; i < ids.num; ++i) {
- tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i]));
- }
+ assert(ids.num == 1);
+ tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
-
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
+
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_FIRST:
case TSDB_FUNC_LAST:
case TSDB_FUNC_SPREAD:
case TSDB_FUNC_LAST_ROW:
case TSDB_FUNC_INTERP: {
- bool requireAllFields = (pItem->pNode->pParam == NULL);
+ bool requireAllFields = (pItem->pNode->Expr.paramList == NULL);
// NOTE: has time range condition or normal column filter condition, the last_row query will be transferred to last query
SConvertFunc cvtFunc = {.originFuncId = functionId, .execFuncId = functionId};
- if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) || (hasNormalColumnFilter(pQueryInfo)))) {
+ if (functionId == TSDB_FUNC_LAST_ROW && ((!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER)) ||
+ (hasNormalColumnFilter(pQueryInfo)) ||
+ taosArrayGetSize(pQueryInfo->pUpstream)>0)) {
cvtFunc.execFuncId = TSDB_FUNC_LAST;
}
if (!requireAllFields) {
- if (taosArrayGetSize(pItem->pNode->pParam) < 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (taosArrayGetSize(pItem->pNode->Expr.paramList) < 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (taosArrayGetSize(pItem->pNode->pParam) > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ if (taosArrayGetSize(pItem->pNode->Expr.paramList) > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
/* in first/last function, multiple columns can be add to resultset */
- for (int32_t i = 0; i < taosArrayGetSize(pItem->pNode->pParam); ++i) {
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, i);
+ for (int32_t i = 0; i < taosArrayGetSize(pItem->pNode->Expr.paramList); ++i) {
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, i);
if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pParamElem->pNode->tokenId == TK_ALL) { // select table.*
- SStrToken tmpToken = pParamElem->pNode->colInfo;
+ SStrToken tmpToken = pParamElem->pNode->columnName;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -2240,42 +2727,42 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SStrToken t = {.z = pSchema[j].name, .n = (uint32_t)strnlen(pSchema[j].name, TSDB_COL_NAME_LEN)};
setResultColName(name, pItem, cvtFunc.originFuncId, &t, true);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index, finalResult) != 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[j], cvtFunc, name, colIndex++, &index,
+ finalResult, pUdfInfo) != 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
} else {
- if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
// functions can not be applied to tags
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
char name[TSDB_COL_NAME_LEN] = {0};
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
- bool multiColOutput = taosArrayGetSize(pItem->pNode->pParam) > 1;
- setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->colInfo, multiColOutput);
+ bool multiColOutput = taosArrayGetSize(pItem->pNode->Expr.paramList) > 1;
+ setResultColName(name, pItem, cvtFunc.originFuncId, &pParamElem->pNode->columnName, multiColOutput);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult) != 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, name, colIndex++, &index, finalResult, pUdfInfo) != 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
-
- return TSDB_CODE_SUCCESS;
+
} else { // select * from xxx
int32_t numOfFields = 0;
// multicolumn selection does not support alias name
if (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
@@ -2289,17 +2776,17 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SStrToken t = {.z = pSchema[i].name, .n = (uint32_t)strnlen(pSchema[i].name, TSDB_COL_NAME_LEN)};
setResultColName(name, pItem, cvtFunc.originFuncId, &t, true);
- if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index, finalResult) != 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, &pSchema[index.columnIndex], cvtFunc, name, colIndex, &index,
+ finalResult, pUdfInfo) != 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
colIndex++;
}
numOfFields += tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
}
-
- return TSDB_CODE_SUCCESS;
}
+ return TSDB_CODE_SUCCESS;
}
case TSDB_FUNC_TOP:
@@ -2307,131 +2794,132 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_PERCT:
case TSDB_FUNC_APERCT: {
// 1. valid the number of parameters
- if (pItem->pNode->pParam == NULL || taosArrayGetSize(pItem->pNode->pParam) != 2) {
+ if (pItem->pNode->Expr.paramList == NULL || taosArrayGetSize(pItem->pNode->Expr.paramList) != 2) {
/* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
if (pParamElem->pNode->tokenId != TK_ID) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. valid the column type
- int16_t colType = pSchema[index.columnIndex].type;
- if (!IS_NUMERIC_TYPE(colType)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (!IS_NUMERIC_TYPE(pSchema->type)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// 3. valid the parameters
if (pParamElem[1].pNode->tokenId == TK_ID) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariant* pVariant = &pParamElem[1].pNode->value;
- int8_t resultType = pSchema[index.columnIndex].type;
- int16_t resultSize = pSchema[index.columnIndex].bytes;
+ int16_t resultType = pSchema->type;
+ int16_t resultSize = pSchema->bytes;
+ int32_t interResult = 0;
- char val[8] = {0};
- SSqlExpr* pExpr = NULL;
-
+ char val[8] = {0};
+
+ SExprInfo* pExpr = NULL;
if (functionId == TSDB_FUNC_PERCT || functionId == TSDB_FUNC_APERCT) {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE, true);
double dp = GET_DOUBLE_VAL(val);
if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- resultSize = sizeof(double);
- resultType = TSDB_DATA_TYPE_DOUBLE;
+ getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, &interResult, 0, false,
+ pUdfInfo);
/*
* sql function transformation
* for dp = 0, it is actually min,
* for dp = 100, it is max,
*/
- tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
colIndex += 1; // the first column is ts
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
- addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false);
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
} else {
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t nTop = GET_INT32_VAL(val);
if (nTop <= 0 || nTop > 100) { // todo use macro
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
// todo REFACTOR
// set the first column ts for top/bottom query
SColumnIndex index1 = {index.tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo),
- TSDB_KEYSIZE, false);
- tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->aliasName));
+ pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pCmd),
+ 0, false);
+ tstrncpy(pExpr->base.aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->base.aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- SColumnList ids = getColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
- insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
+ SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts
- pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
- addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
+ pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false);
+ tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
}
- memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
- getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
-
- SColumnList ids = getColumnList(1, index.tableIndex, index.columnIndex);
+ memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
+ getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token,sizeof(pExpr->base.aliasName) - 1);
+
+ // todo refactor: tscColumnListInsert part
+ SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+
if (finalResult) {
- insertResultField(pQueryInfo, colIndex, &ids, resultSize, resultType, pExpr->aliasName, pExpr);
+ insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr);
} else {
- for (int32_t i = 0; i < ids.num; ++i) {
- tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i]));
- }
+ assert(ids.num == 1);
+ tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
return TSDB_CODE_SUCCESS;
- };
+ }
case TSDB_FUNC_TID_TAG: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// no parameters or more than one parameter for function
- if (pItem->pNode->pParam == NULL || taosArrayGetSize(pItem->pNode->pParam) != 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (pItem->pNode->Expr.paramList == NULL || taosArrayGetSize(pItem->pNode->Expr.paramList) != 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->pParam, 0);
+ tSqlExprItem* pParamItem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
tSqlExpr* pParam = pParamItem->pNode;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (getColumnIndexByName(&pParam->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -2440,7 +2928,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (index.columnIndex > 0) {
@@ -2456,12 +2944,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
if (colType == TSDB_DATA_TYPE_BOOL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
+ &pSchema[index.columnIndex]);
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
-
+
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
s = *tGetTbnameColumnSchema();
@@ -2473,50 +2962,114 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
int16_t type = 0;
int32_t inter = 0;
- int32_t ret = getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
+ int32_t ret = getResultDataInfo(s.type, s.bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0, NULL);
assert(ret == TSDB_CODE_SUCCESS);
s.type = (uint8_t)type;
s.bytes = bytes;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
- tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG);
+ tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG, getNewResColId(pCmd));
return TSDB_CODE_SUCCESS;
}
+
case TSDB_FUNC_BLKINFO: {
// no parameters or more than one parameter for function
- if (pItem->pNode->pParam != NULL && taosArrayGetSize(pItem->pNode->pParam) != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (pItem->pNode->Expr.paramList != NULL && taosArrayGetSize(pItem->pNode->Expr.paramList) != 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- SColumnIndex index = {.tableIndex = 0, .columnIndex = TSDB_BLOCK_DIST_COLUMN_INDEX,};
+ SColumnIndex index = {.tableIndex = 0, .columnIndex = 0,};
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY};
int32_t inter = 0;
int16_t resType = 0;
int16_t bytes = 0;
- getResultDataInfo(TSDB_DATA_TYPE_INT, 4, TSDB_FUNC_BLKINFO, 0, &resType, &bytes, &inter, 0, 0);
- s.bytes = bytes;
- s.type = (uint8_t)resType;
- SSqlExpr* pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, &s, TSDB_COL_TAG);
- pExpr->numOfParams = 1;
- pExpr->param[0].i64 = pTableMetaInfo->pTableMeta->tableInfo.rowSize;
- pExpr->param[0].nType = TSDB_DATA_TYPE_BIGINT;
+ getResultDataInfo(TSDB_DATA_TYPE_INT, 4, TSDB_FUNC_BLKINFO, 0, &resType, &bytes, &inter, 0, 0, NULL);
+
+ SSchema s = {.name = "block_dist", .type = TSDB_DATA_TYPE_BINARY, .bytes = bytes};
+
+ SExprInfo* pExpr =
+ tscExprInsert(pQueryInfo, 0, TSDB_FUNC_BLKINFO, &index, resType, bytes, getNewResColId(pCmd), bytes, 0);
+ tstrncpy(pExpr->base.aliasName, s.name, sizeof(pExpr->base.aliasName));
+
+ SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ insertResultField(pQueryInfo, 0, &ids, bytes, s.type, s.name, pExpr);
+
+ pExpr->base.numOfParams = 1;
+ pExpr->base.param[0].i64 = pTableMetaInfo->pTableMeta->tableInfo.rowSize;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
return TSDB_CODE_SUCCESS;
}
- default:
- return TSDB_CODE_TSC_INVALID_SQL;
+ default: {
+ pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
+ if (pUdfInfo == NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ }
+
+ if (taosArrayGetSize(pItem->pNode->Expr.paramList) <= 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg13);
+ }
+
+ tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);;
+ if (pParamElem->pNode->tokenId != TK_ID) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ if (getColumnIndexByName(&pParamElem->pNode->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+
+ // functions can not be applied to tags
+ if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+
+ int32_t inter = 0;
+ int16_t resType = 0;
+ int16_t bytes = 0;
+ getResultDataInfo(TSDB_DATA_TYPE_INT, 4, functionId, 0, &resType, &bytes, &inter, 0, false, pUdfInfo);
+
+ SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resType, bytes, getNewResColId(pCmd), inter, false);
+
+ memset(pExpr->base.aliasName, 0, tListLen(pExpr->base.aliasName));
+ getColumnName(pItem, pExpr->base.aliasName, pExpr->base.token, sizeof(pExpr->base.aliasName) - 1);
+
+ SSchema s = {0};
+ s.type = (uint8_t)resType;
+ s.bytes = bytes;
+ s.colId = pExpr->base.colInfo.colId;
+
+ uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
+ SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
+ if (finalResult) {
+ insertResultField(pQueryInfo, colIndex, &ids, pUdfInfo->resBytes, pUdfInfo->resType, pExpr->base.aliasName, pExpr);
+ } else {
+ for (int32_t i = 0; i < ids.num; ++i) {
+ tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
+ }
+ }
+ tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
+ return TSDB_CODE_SUCCESS;
+ }
}
-
+
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// todo refactor
-static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) {
+static SColumnList createColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex) {
assert(num == 1 && tableIndex >= 0);
SColumnList columnList = {0};
@@ -2529,12 +3082,16 @@ static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t column
return columnList;
}
-void getColumnName(tSqlExprItem* pItem, char* resultFieldName, int32_t nameLength) {
+void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength) {
+ int32_t len = ((int32_t)pItem->pNode->exprToken.n < nameLength) ? (int32_t)pItem->pNode->exprToken.n : nameLength;
+ strncpy(rawName, pItem->pNode->exprToken.z, len);
+
if (pItem->aliasName != NULL) {
- strncpy(resultFieldName, pItem->aliasName, nameLength);
+ int32_t aliasNameLen = (int32_t) strlen(pItem->aliasName);
+ len = (aliasNameLen < nameLength)? aliasNameLen:nameLength;
+ strncpy(resultFieldName, pItem->aliasName, len);
} else {
- int32_t len = ((int32_t)pItem->pNode->token.n < nameLength) ? (int32_t)pItem->pNode->token.n : nameLength;
- strncpy(resultFieldName, pItem->pNode->token.z, len);
+ strncpy(resultFieldName, rawName, len);
}
}
@@ -2543,16 +3100,7 @@ static bool isTablenameToken(SStrToken* token) {
SStrToken tableToken = {0};
extractTableNameFromToken(&tmpToken, &tableToken);
-
- return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L));
-}
-static bool isTableBlockDistToken(SStrToken* token) {
- SStrToken tmpToken = *token;
- SStrToken tableToken = {0};
-
- extractTableNameFromToken(&tmpToken, &tableToken);
-
- return (strncasecmp(TSQL_BLOCK_DIST, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_BLOCK_DIST_L));
+ return (tmpToken.n == strlen(TSQL_TBNAME_L) && strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0);
}
static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) {
@@ -2577,16 +3125,17 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken
return columnIndex;
}
-int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) {
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
if (isTablenameToken(pToken)) {
pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX;
- } else if (isTableBlockDistToken(pToken)) {
- pIndex->columnIndex = TSDB_BLOCK_DIST_COLUMN_INDEX;
- } else if (strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) {
- pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX;
+ } else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n &&
+ strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) {
+ pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest
+ } else if (pToken->n == 0) {
+ pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest
} else {
// not specify the table name, try to locate the table index by column name
if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) {
@@ -2595,7 +3144,7 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQu
if (colIndex != COLUMN_INDEX_INITIAL_VAL) {
if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(msg, msg0);
} else {
pIndex->tableIndex = i;
pIndex->columnIndex = colIndex;
@@ -2610,14 +3159,14 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQu
}
if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(msg, msg1);
}
}
if (COLUMN_INDEX_VALIDE(*pIndex)) {
return TSDB_CODE_SUCCESS;
} else {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -2643,7 +3192,7 @@ int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColum
}
if (pIndex->tableIndex < 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
@@ -2654,35 +3203,34 @@ int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
extractTableNameFromToken(pToken, &tableToken);
if (getTableIndexImpl(&tableToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
}
-int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getColumnIndexByName(const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, char* msg) {
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
SStrToken tmpToken = *pToken;
if (getTableIndexByName(&tmpToken, pQueryInfo, pIndex) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex);
+ return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex, msg);
}
int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SSqlCmd* pCmd = &pSql->cmd;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- assert(pCmd->numOfClause == 1);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
pCmd->command = TSDB_SQL_SHOW;
const char* msg1 = "invalid name";
- const char* msg2 = "pattern filter string too long";
+ const char* msg2 = "wildcard string should be less than %d characters";
const char* msg3 = "database name too long";
const char* msg4 = "invalid ip address";
const char* msg5 = "database name is empty";
@@ -2700,20 +3248,20 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pDbPrefixToken->type != 0) {
if (pDbPrefixToken->n >= TSDB_DB_NAME_LEN) { // db name is too long
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pDbPrefixToken->n <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -2723,26 +3271,28 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pPattern->n = strdequote(pPattern->z);
if (pPattern->n <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (!tscValidateTableNameLength(pCmd->payloadLen)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (pPattern->n > tsMaxWildCardsLen){
+ char tmp[64] = {0};
+ sprintf(tmp, msg2, tsMaxWildCardsLen);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), tmp);
}
}
} else if (showType == TSDB_MGMT_TABLE_VNODES) {
if (pShowInfo->prefix.type == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "No specified ip of dnode");
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "No specified ip of dnode");
}
// show vnodes may be ip addr of dnode in payload
SStrToken* pDnodeIp = &pShowInfo->prefix;
if (pDnodeIp->n >= TSDB_IPv4ADDR_LEN) { // ip addr is too long
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (!validateIpAddress(pDnodeIp->z, pDnodeIp->n)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
return TSDB_CODE_SUCCESS;
@@ -2758,7 +3308,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
SStrToken* idStr = &(pInfo->pMiscInfo->id);
if (idStr->n > TSDB_KILL_MSG_LEN) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
strncpy(pCmd->payload, idStr->z, idStr->n);
@@ -2770,7 +3320,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
int32_t connId = (int32_t)strtol(connIdStr, NULL, 10);
if (connId <= 0) {
memset(pCmd->payload, 0, strlen(pCmd->payload));
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (killType == TSDB_SQL_KILL_CONNECTION) {
@@ -2781,15 +3331,20 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
if (queryId <= 0) {
memset(pCmd->payload, 0, strlen(pCmd->payload));
if (killType == TSDB_SQL_KILL_QUERY) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
return TSDB_CODE_SUCCESS;
}
+static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
+ SSqlCmd* pCmd = &pSql->cmd;
+ pCmd->command = pInfo->type;
+ return TSDB_CODE_SUCCESS;
+}
bool validateIpAddress(const char* ip, size_t size) {
char tmp[128] = {0}; // buffer to build null-terminated string
assert(size < 128);
@@ -2805,7 +3360,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pTableMeta == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
assert(tscGetNumOfTags(pTableMetaInfo->pTableMeta) >= 0);
@@ -2814,25 +3369,25 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) {
int16_t type = 0;
int32_t interBytes = 0;
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
- int16_t functionId = aAggs[pExpr->functionId].stableFuncId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, k);
+ int16_t functionId = aAggs[pExpr->base.functionId].stableFuncId;
- int32_t colIndex = pExpr->colInfo.colIndex;
+ int32_t colIndex = pExpr->base.colInfo.colIndex;
SSchema* pSrcSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, colIndex);
if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) ||
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) ||
- (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) {
- if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->param[0].i64, &type, &bytes,
- &interBytes, 0, true) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE)) {
+ if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes,
+ &interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- tscSqlExprUpdate(pQueryInfo, k, functionId, pExpr->colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes);
+ tscExprUpdate(pQueryInfo, k, functionId, pExpr->base.colInfo.colIndex, TSDB_DATA_TYPE_BINARY, bytes);
// todo refactor
- pExpr->interBytes = interBytes;
+ pExpr->base.interBytes = interBytes;
}
}
@@ -2847,16 +3402,20 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
return;
}
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->base.colInfo.colIndex);
// the final result size and type in the same as query on single table.
// so here, set the flag to be false;
int32_t inter = 0;
- int32_t functionId = pExpr->functionId;
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId < 0) {
+ continue;
+ }
+
if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) {
continue;
}
@@ -2868,43 +3427,45 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
} else if (functionId == TSDB_FUNC_STDDEV_DST) {
functionId = TSDB_FUNC_STDDEV;
}
-
- getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes,
- &inter, 0, false);
+
+ getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->base.resType, &pExpr->base.resBytes, &inter,
+ 0, false, NULL);
}
}
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
- const char* msg1 = "TWA not allowed to apply to super table directly";
- const char* msg2 = "TWA only support group by tbname for super table query";
- const char* msg3 = "function not support for super table query";
+ const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table directly";
+ const char* msg2 = "TWA/Diff/Derivative/Irate only support group by tbname for super table query";
+ const char* msg3 = "functions not support for super table query";
// filter sql function not supported by metric query yet.
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
+ int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
+ if (functionId < 0) {
+ continue;
+ }
+
if ((aAggs[functionId].status & TSDB_FUNCSTATE_STABLE) == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
}
}
- if (tscIsTWAQuery(pQueryInfo)) {
+ if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true;
}
- if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
+ if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
- } else {
- SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
- if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return true;
- }
}
+ } else if (tscIsSessionWindowQuery(pQueryInfo)) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return true;
}
return false;
@@ -2927,47 +3488,77 @@ static bool groupbyTagsOrNull(SQueryInfo* pQueryInfo) {
return true;
}
-static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool twQuery) {
- int32_t startIdx = 0;
-
- size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo);
- assert(numOfExpr > 0);
-
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, startIdx);
-
- // ts function can be simultaneously used with any other functions.
- int32_t functionID = pExpr->functionId;
- if (functionID == TSDB_FUNC_TS || functionID == TSDB_FUNC_TS_DUMMY) {
- startIdx++;
- }
-
- int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId];
-
- if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || twQuery || !groupbyTagsOrNull(pQueryInfo))) {
+bool groupbyTbname(SQueryInfo* pQueryInfo) {
+ if (pQueryInfo->groupbyExpr.columnInfo == NULL ||
+ taosArrayGetSize(pQueryInfo->groupbyExpr.columnInfo) == 0) {
return false;
}
- // diff function cannot be executed with other function
- // arithmetic function can be executed with other arithmetic functions
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
- for (int32_t i = startIdx + 1; i < size; ++i) {
- SSqlExpr* pExpr1 = tscSqlExprGet(pQueryInfo, i);
+ size_t s = taosArrayGetSize(pQueryInfo->groupbyExpr.columnInfo);
+ for (int32_t i = 0; i < s; i++) {
+ SColIndex* colIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i);
+ if (colIndex->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+
+
+
+static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool twQuery) {
+ int32_t startIdx = 0;
+ int32_t aggUdf = 0;
+ int32_t scalarUdf = 0;
+ int32_t prjNum = 0;
+ int32_t aggNum = 0;
+
+ size_t numOfExpr = tscNumOfExprs(pQueryInfo);
+ assert(numOfExpr > 0);
+
+ int32_t factor = INT32_MAX;
+
+ // diff function cannot be executed with other function
+ // arithmetic function can be executed with other arithmetic functions
+ size_t size = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = startIdx; i < size; ++i) {
+ SExprInfo* pExpr1 = tscExprGet(pQueryInfo, i);
+
+ int16_t functionId = pExpr1->base.functionId;
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
+ pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE ? ++aggUdf : ++scalarUdf;
+
+ continue;
+ }
+
+ if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) {
+ ++prjNum;
- int16_t functionId = pExpr1->functionId;
- if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) {
continue;
}
- if (functionId == TSDB_FUNC_PRJ && (pExpr1->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->colInfo.flag))) {
+ if (functionId == TSDB_FUNC_PRJ) {
+ ++prjNum;
+ }
+
+ if (functionId == TSDB_FUNC_PRJ && (pExpr1->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->base.colInfo.flag))) {
continue;
}
- if (functionCompatList[functionId] != factor) {
- return false;
+ if (factor == INT32_MAX) {
+ factor = functionCompatList[functionId];
} else {
- if (factor == -1) { // two functions with the same -1 flag
+ if (functionCompatList[functionId] != factor) {
return false;
+ } else {
+ if (factor == -1) { // two functions with the same -1 flag
+ return false;
+ }
}
}
@@ -2976,42 +3567,62 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
}
}
+ aggNum = (int32_t)size - prjNum - aggUdf - scalarUdf;
+
+ assert(aggNum >= 0);
+
+ if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalarUdf > 0)) {
+ return false;
+ }
+
+ if (scalarUdf > 0 && aggNum > 0) {
+ return false;
+ }
+
return true;
}
-int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) {
+int32_t validateGroupbyNode(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) {
const char* msg1 = "too many columns in group by clause";
const char* msg2 = "invalid column name in group by clause";
const char* msg3 = "columns from one table allowed as group by columns";
const char* msg4 = "join query does not support group by";
+ const char* msg5 = "not allowed column type for group by";
+ const char* msg6 = "tags not allowed for table query";
const char* msg7 = "not support group by expression";
- const char* msg8 = "not allowed column type for group by";
- const char* msg9 = "tags not allowed for table query";
+ const char* msg8 = "normal column can only locate at the end of group by clause";
// todo : handle two tables situation
STableMetaInfo* pTableMetaInfo = NULL;
-
if (pList == NULL) {
return TSDB_CODE_SUCCESS;
}
+ if (pQueryInfo->numOfTables > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
+ if (pGroupExpr->columnInfo == NULL) {
+ pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
+ }
+
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
-
- pQueryInfo->groupbyExpr.numOfGroupCols = (int16_t)taosArrayGetSize(pList);
- if (pQueryInfo->groupbyExpr.numOfGroupCols > TSDB_MAX_TAGS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
- if (pQueryInfo->numOfTables > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ if (pGroupExpr->columnInfo == NULL || pQueryInfo->colList == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- STableMeta* pTableMeta = NULL;
- SSchema* pSchema = NULL;
+ pGroupExpr->numOfGroupCols = (int16_t)taosArrayGetSize(pList);
+ if (pGroupExpr->numOfGroupCols > TSDB_MAX_TAGS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
- int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
+ SSchema *pSchema = NULL;
+ int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
+ int32_t numOfGroupCols = 0;
size_t num = taosArrayGetSize(pList);
for (int32_t i = 0; i < num; ++i) {
@@ -3021,39 +3632,31 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
SStrToken token = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (getColumnIndexByName(&token, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (tableIndex == COLUMN_INDEX_INITIAL_VAL) {
tableIndex = index.tableIndex;
} else if (tableIndex != index.tableIndex) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
- pTableMeta = pTableMetaInfo->pTableMeta;
-
- int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pSchema = tGetTbnameColumnSchema();
} else {
pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
}
- bool groupTag = false;
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols) {
- groupTag = true;
- }
-
- SSqlGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr;
- if (pGroupExpr->columnInfo == NULL) {
- pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex));
- }
-
+ int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
+ bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols);
+
if (groupTag) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
int32_t relIndex = index.columnIndex;
@@ -3062,25 +3665,38 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
}
SColIndex colIndex = { .colIndex = relIndex, .flag = TSDB_COL_TAG, .colId = pSchema->colId, };
+ strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name));
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
index.columnIndex = relIndex;
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pSchema);
} else {
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- tscColumnListInsert(pQueryInfo->colList, &index);
+ tscColumnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->id.uid, pSchema);
SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
+ strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name));
+
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
+ numOfGroupCols++;
+ }
+ }
- if (i == 0 && num > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
- }
+ // 1. only one normal column allowed in the group by clause
+ // 2. the normal column in the group by clause can only located in the end position
+ if (numOfGroupCols > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
+
+ for(int32_t i = 0; i < num; ++i) {
+ SColIndex* pIndex = taosArrayGet(pGroupExpr->columnInfo, i);
+ if (TSDB_COL_IS_NORMAL_COL(pIndex->flag) && i != num - 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
}
@@ -3089,33 +3705,29 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
}
-static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
- if (pColumn == NULL) {
- return NULL;
- }
-
- int32_t size = pColumn->numOfFilters + 1;
+static SColumnFilterInfo* addColumnFilterInfo(SColumnFilterList* filterList) {
+ int32_t size = (filterList->numOfFilters) + 1;
- char* tmp = (char*) realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size));
+ char* tmp = (char*) realloc((void*)(filterList->filterInfo), sizeof(SColumnFilterInfo) * (size));
if (tmp != NULL) {
- pColumn->filterInfo = (SColumnFilterInfo*)tmp;
+ filterList->filterInfo = (SColumnFilterInfo*)tmp;
} else {
return NULL;
}
- pColumn->numOfFilters++;
+ filterList->numOfFilters = size;
- SColumnFilterInfo* pColFilterInfo = &pColumn->filterInfo[pColumn->numOfFilters - 1];
+ SColumnFilterInfo* pColFilterInfo = &(filterList->filterInfo[size - 1]);
memset(pColFilterInfo, 0, sizeof(SColumnFilterInfo));
return pColFilterInfo;
}
-static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, STableMeta* pTableMeta, SColumnFilterInfo* pColumnFilter,
+static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t timePrecision, SColumnFilterInfo* pColumnFilter,
int16_t colType, tSqlExpr* pExpr) {
const char* msg = "not supported filter condition";
- tSqlExpr* pRight = pExpr->pRight;
+ tSqlExpr *pRight = pExpr->pRight;
if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
colType = TSDB_DATA_TYPE_BIGINT;
@@ -3127,10 +3739,10 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
return retVal;
}
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BIGINT == pRight->value.nType)) {
- STableComInfo tinfo = tscGetTableInfo(pTableMeta);
-
- if ((tinfo.precision == TSDB_TIME_PRECISION_MILLI) && (pRight->flags & (1 << EXPR_FLAG_US_TIMESTAMP))) {
- pRight->value.i64 /= 1000;
+ if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
+ pRight->value.i64 =
+ convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
+ pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP);
}
}
@@ -3140,13 +3752,33 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (IS_NUMERIC_TYPE(pRight->value.nType)) {
bufLen = 60;
} else {
- bufLen = pRight->value.nLen + 1;
+ /*
+ * make memory sanitizer happy;
+ */
+ if (pRight->value.nLen == 0) {
+ bufLen = pRight->value.nLen + 2;
+ } else {
+ bufLen = pRight->value.nLen + 1;
+ }
}
if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) {
retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false);
// TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
+ } else if (pExpr->tokenId == TK_IN) {
+ tVariant *pVal;
+ if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->Expr.paramList, &pVal, colType, timePrecision)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
+ }
+ pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
+ pColumnFilter->len = pVal->nLen;
+ pColumnFilter->filterstr = 1;
+ memcpy((char *)(pColumnFilter->pz), (char *)(pVal->pz), pVal->nLen);
+
+ tVariantDestroy(pVal);
+ free(pVal);
+
} else if (colType == TSDB_DATA_TYPE_BINARY) {
pColumnFilter->pz = (int64_t)calloc(1, bufLen * TSDB_NCHAR_SIZE);
pColumnFilter->len = pRight->value.nLen;
@@ -3164,7 +3796,7 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
}
if (retVal != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
switch (pExpr->tokenId) {
@@ -3195,8 +3827,11 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
case TK_NOTNULL:
pColumnFilter->lowerRelOptr = TSDB_RELATION_NOTNULL;
break;
+ case TK_IN:
+ pColumnFilter->lowerRelOptr = TSDB_RELATION_IN;
+ break;
default:
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
return TSDB_CODE_SUCCESS;
@@ -3220,11 +3855,11 @@ typedef struct SCondExpr {
static int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t timePrecision);
static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) {
- SArray* pList = pExpr->pParam;
+ SArray* pList = pExpr->Expr.paramList;
int32_t size = (int32_t) taosArrayGetSize(pList);
if (size <= 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (size > 0) {
@@ -3242,7 +3877,7 @@ static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) {
}
if (pVar->nLen <= 0 || !tscValidateTableNameLength(pVar->nLen)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -3272,8 +3907,9 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
const char* msg1 = "non binary column not support like operator";
const char* msg2 = "binary column not support this operator";
const char* msg3 = "bool column not support this operator";
+ const char* msg4 = "primary key not support this operator";
- SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex);
+ SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex->columnIndex, pTableMeta->id.uid, pSchema);
SColumnFilterInfo* pColFilter = NULL;
/*
@@ -3282,10 +3918,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
*/
if (sqlOptr == TK_AND) {
// this is a new filter condition on this column
- if (pColumn->numOfFilters == 0) {
- pColFilter = addColumnFilterInfo(pColumn);
+ if (pColumn->info.flist.numOfFilters == 0) {
+ pColFilter = addColumnFilterInfo(&pColumn->info.flist);
} else { // update the existed column filter information, find the filter info here
- pColFilter = &pColumn->filterInfo[0];
+ pColFilter = &pColumn->info.flist.filterInfo[0];
}
if (pColFilter == NULL) {
@@ -3293,12 +3929,12 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
}
} else if (sqlOptr == TK_OR) {
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
- pColFilter = addColumnFilterInfo(pColumn);
+ pColFilter = addColumnFilterInfo(&pColumn->info.flist);
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else { // error;
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
pColFilter->filterstr =
@@ -3310,27 +3946,30 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
&& pExpr->tokenId != TK_ISNULL
&& pExpr->tokenId != TK_NOTNULL
&& pExpr->tokenId != TK_LIKE
- ) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ && pExpr->tokenId != TK_IN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->tokenId == TK_LIKE) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
int32_t t = pExpr->tokenId;
- if (t != TK_EQ && t != TK_NE && t != TK_NOTNULL && t != TK_ISNULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (t != TK_EQ && t != TK_NE && t != TK_NOTNULL && t != TK_ISNULL && t != TK_IN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
- pColumn->colIndex = *pIndex;
+ pColumn->columnIndex = pIndex->columnIndex;
+ pColumn->tableUid = pTableMeta->id.uid;
+ if (pColumn->columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX && pExpr->tokenId == TK_IN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
- int16_t colType = pSchema->type;
-
- return doExtractColumnFilterInfo(pCmd, pQueryInfo, pTableMeta, pColFilter, colType, pExpr);
+ STableComInfo tinfo = tscGetTableInfo(pTableMeta);
+ return doExtractColumnFilterInfo(pCmd, pQueryInfo, tinfo.precision, pColFilter, pSchema->type, pExpr);
}
static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
@@ -3344,8 +3983,8 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
tSqlExpr* pLeft = pTableCond->pLeft;
tSqlExpr* pRight = pTableCond->pRight;
- if (!isTablenameToken(&pLeft->colInfo)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (!isTablenameToken(&pLeft->columnName)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t ret = TSDB_CODE_SUCCESS;
@@ -3354,14 +3993,14 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
ret = tablenameListToString(pRight, sb);
} else if (pTableCond->tokenId == TK_LIKE) {
if (pRight->tokenId != TK_STRING) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
ret = tablenameCondToString(pRight, sb);
}
if (ret != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
return ret;
@@ -3369,8 +4008,10 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t relOptr) {
if (pExpr == NULL) {
+ pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
+ pQueryInfo->onlyHasTagCond &= false;
if (!tSqlExprIsParentOfLeaf(pExpr)) { // internal node
int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
@@ -3381,8 +4022,8 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq
return getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
} else { // handle leaf node
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return extractColumnFilterInfo(pCmd, pQueryInfo, &index, pExpr, relOptr);
@@ -3392,9 +4033,9 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq
static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
int32_t code = 0;
const char* msg1 = "timestamp required for join tables";
+ const char* msg2 = "only support one join tag for each table";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
- const char* msg5 = "only support one join tag for each table";
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
@@ -3405,13 +4046,13 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (code) {
return code;
}
-
+
return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3421,28 +4062,31 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*leftNode == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
+
(*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*leftNode)->tagColId = pTagSchema1->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
- if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
+ atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
+
+ if (pTableMetaInfo->joinTagNum > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
}
int16_t leftIdx = index.tableIndex;
-
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ if (getColumnIndexByName(&pExpr->pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3452,18 +4096,22 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
if (*rightNode == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
(*rightNode)->uid = pTableMetaInfo->pTableMeta->id.uid;
(*rightNode)->tagColId = pTagSchema2->colId;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) {
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
- if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+ index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMeta);
+ if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
+
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
+ atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
+
+ if (pTableMetaInfo->joinTagNum > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
}
@@ -3471,28 +4119,29 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
int16_t rightIdx = index.tableIndex;
if (pTagSchema1->type != pTagSchema2->type) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if ((*leftNode)->tagJoin == NULL) {
(*leftNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
}
-
+
if ((*rightNode)->tagJoin == NULL) {
(*rightNode)->tagJoin = taosArrayInit(2, sizeof(int16_t));
- }
-
+ }
+
taosArrayPush((*leftNode)->tagJoin, &rightIdx);
taosArrayPush((*rightNode)->tagJoin, &leftIdx);
-
+
pQueryInfo->tagCond.joinInfo.hasJoin = true;
-
+
return TSDB_CODE_SUCCESS;
}
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) {
if (pExpr == NULL) {
+ pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
@@ -3505,12 +4154,12 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
if (*type == NON_ARITHMEIC_EXPR) {
*type = NORMAL_ARITHMETIC;
} else if (*type == AGG_ARIGHTMEIC) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (getColumnIndexByName(&pExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// if column is timestamp, bool, binary, nchar, not support arithmetic, so return invalid sql
@@ -3519,54 +4168,58 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer
if ((pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) || (pSchema->type == TSDB_DATA_TYPE_BOOL) ||
(pSchema->type == TSDB_DATA_TYPE_BINARY) || (pSchema->type == TSDB_DATA_TYPE_NCHAR)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
pList->ids[pList->num++] = index;
} else if ((pExpr->tokenId == TK_FLOAT && (isnan(pExpr->value.dKey) || isinf(pExpr->value.dKey))) ||
pExpr->tokenId == TK_NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
} else if (pExpr->type == SQL_NODE_SQLFUNCTION) {
if (*type == NON_ARITHMEIC_EXPR) {
*type = AGG_ARIGHTMEIC;
} else if (*type == NORMAL_ARITHMETIC) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL};
// sql function list in selection clause.
// Append the sqlExpr into exprList of pQueryInfo structure sequentially
- pExpr->functionId = isValidFunction(pExpr->operand.z, pExpr->operand.n);
+ pExpr->functionId = isValidFunction(pExpr->Expr.operand.z, pExpr->Expr.operand.n);
if (pExpr->functionId < 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false, NULL) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// It is invalid in case of more than one sqlExpr, such as first(ts, k) - last(ts, k)
- int32_t inc = (int32_t) tscSqlExprNumOfExprs(pQueryInfo) - outputIndex;
+ int32_t inc = (int32_t) tscNumOfExprs(pQueryInfo) - outputIndex;
if (inc > 1) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// Not supported data type in arithmetic expression
uint64_t id = -1;
for(int32_t i = 0; i < inc; ++i) {
- SSqlExpr* p1 = tscSqlExprGet(pQueryInfo, i + outputIndex);
- int16_t t = p1->resType;
+ SExprInfo* p1 = tscExprGet(pQueryInfo, i + outputIndex);
+
+ int16_t t = p1->base.resType;
if (t == TSDB_DATA_TYPE_BINARY || t == TSDB_DATA_TYPE_NCHAR || t == TSDB_DATA_TYPE_BOOL || t == TSDB_DATA_TYPE_TIMESTAMP) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (i == 0) {
- id = p1->uid;
- } else if (id != p1->uid) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ id = p1->base.uid;
+ continue;
+ }
+
+ if (id != p1->base.uid) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -3611,7 +4264,7 @@ static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryI
// the expression not from the same table, return error
if (uidLeft != uidRight && uidLeft != 0 && uidRight != 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -3638,11 +4291,15 @@ static bool isValidExpr(tSqlExpr* pLeft, tSqlExpr* pRight, int32_t optr) {
if (pRight == NULL) {
return true;
}
-
+
if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && pRight->tokenId >= TK_BOOL && pRight->tokenId <= TK_BINARY) {
return false;
}
+ if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && (optr == TK_NOTNULL || optr == TK_ISNULL)) {
+ return false;
+ }
+
return true;
}
@@ -3695,14 +4352,14 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr
}
if (pExpr->tokenId != TK_EQ) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &rightIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
@@ -3711,21 +4368,25 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr
SSchema* pLeftSchema = tscGetTableSchema(pLeftMeterMeta->pTableMeta);
int16_t leftType = pLeftSchema[pLeftIndex->columnIndex].type;
+ tscColumnListInsert(pQueryInfo->colList, pLeftIndex->columnIndex, pLeftMeterMeta->pTableMeta->id.uid, &pLeftSchema[pLeftIndex->columnIndex]);
+
STableMetaInfo* pRightMeterMeta = tscGetMetaInfo(pQueryInfo, rightIndex.tableIndex);
SSchema* pRightSchema = tscGetTableSchema(pRightMeterMeta->pTableMeta);
int16_t rightType = pRightSchema[rightIndex.columnIndex].type;
+ tscColumnListInsert(pQueryInfo->colList, rightIndex.columnIndex, pRightMeterMeta->pTableMeta->id.uid, &pRightSchema[rightIndex.columnIndex]);
+
if (leftType != rightType) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
} else if (pLeftIndex->tableIndex == rightIndex.tableIndex) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
return false;
}
// table to table/ super table to super table are allowed
if (UTIL_TABLE_IS_SUPER_TABLE(pLeftMeterMeta) != UTIL_TABLE_IS_SUPER_TABLE(pRightMeterMeta)) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
@@ -3747,7 +4408,7 @@ static bool validTableNameOptr(tSqlExpr* pExpr) {
static int32_t setExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, const char* msg, int32_t parentOptr, char* msgBuf) {
if (*parent != NULL) {
if (parentOptr == TK_OR && msg != NULL) {
- return invalidSqlErrMsg(msgBuf, msg);
+ return invalidOperationMsg(msgBuf, msg);
}
*parent = tSqlExprCreate((*parent), pExpr, parentOptr);
@@ -3763,7 +4424,7 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) {
tSqlExpr* pRight = pExpr->pRight;
if (pRight->tokenId == TK_NULL && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) {
- return invalidSqlErrMsg(msgBuf, msg);
+ return invalidOperationMsg(msgBuf, msg);
}
return TSDB_CODE_SUCCESS;
@@ -3771,20 +4432,22 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) {
// check for like expression
static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
- const char* msg1 = "wildcard string should be less than 20 characters";
+ const char* msg1 = "wildcard string should be less than %d characters";
const char* msg2 = "illegal column name";
tSqlExpr* pLeft = pExpr->pLeft;
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_LIKE) {
- if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) {
- return invalidSqlErrMsg(msgBuf, msg1);
+ if (pRight->value.nLen > tsMaxWildCardsLen) {
+ char tmp[64] = {0};
+ sprintf(tmp, msg1, tsMaxWildCardsLen);
+ return invalidOperationMsg(msgBuf, tmp);
}
SSchema* pSchema = tscGetTableSchema(pTableMeta);
- if ((!isTablenameToken(&pLeft->colInfo)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) {
- return invalidSqlErrMsg(msgBuf, msg2);
+ if ((!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) {
+ return invalidOperationMsg(msgBuf, msg2);
}
}
@@ -3807,8 +4470,8 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
int32_t ret = TSDB_CODE_SUCCESS;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
assert(tSqlExprIsParentOfLeaf(*pExpr));
@@ -3828,9 +4491,10 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return code;
}
- if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// set join query condition
@@ -3846,15 +4510,15 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
-
+
int16_t leftIdx = index.tableIndex;
- if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if (getColumnIndexByName(&pRight->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex];
@@ -3864,27 +4528,27 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
}
-
+
int16_t rightIdx = index.tableIndex;
if ((*leftNode)->tsJoin == NULL) {
(*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
}
-
+
if ((*rightNode)->tsJoin == NULL) {
(*rightNode)->tsJoin = taosArrayInit(2, sizeof(int16_t));
- }
-
+ }
+
taosArrayPush((*leftNode)->tsJoin, &rightIdx);
taosArrayPush((*rightNode)->tsJoin, &leftIdx);
-
+
/*
- * to release expression, e.g., m1.ts = m2.ts,
+ * To release expression, e.g., m1.ts = m2.ts,
* since this expression is used to set the join query type
*/
tSqlExprDestroy(*pExpr);
} else {
- ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg);
+ ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload);
}
*pExpr = NULL; // remove this expression
@@ -3892,17 +4556,17 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
// query on tags, check for tag query condition
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// in case of in operator, keep it in a seprate attribute
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (!validTableNameOptr(*pExpr)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pCondExpr->pTableCond == NULL) {
@@ -3910,7 +4574,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
pCondExpr->relType = parentOptr;
pCondExpr->tableCondIndex = index.tableIndex;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
*type = TSQL_EXPR_TBNAME;
@@ -3918,11 +4582,11 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
} else {
if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
- ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
+ ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL;
} else {
// do nothing
@@ -3937,10 +4601,10 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
*type = TSQL_EXPR_COLUMN;
if (pRight->tokenId == TK_ID) { // other column cannot be served as the join column
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg);
+ ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL; // remove it from expr tree
}
@@ -3956,14 +4620,14 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
const char* msg1 = "query condition between different columns must use 'AND'";
if ((*pExpr)->flags & (1 << EXPR_FLAG_TS_ERROR)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
tSqlExpr* pLeft = (*pExpr)->pLeft;
tSqlExpr* pRight = (*pExpr)->pRight;
if (!isValidExpr(pLeft, pRight, (*pExpr)->tokenId)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t leftType = -1;
@@ -3986,7 +4650,7 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
*/
if (leftType != rightType) {
if ((*pExpr)->tokenId == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -3997,11 +4661,11 @@ int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr
exchangeExpr(*pExpr);
if (pLeft->tokenId == TK_ID && pRight->tokenId == TK_TIMESTAMP && (pRight->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR))) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if ((pLeft->flags & (1 << EXPR_FLAG_TS_ERROR)) || (pRight->flags & (1 << EXPR_FLAG_TS_ERROR))) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, parentOptr);
@@ -4012,7 +4676,7 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSqlExpr** pExpr, SQueryInfo*
tSqlExpr* pLeft = (*pExpr)->pLeft;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(&pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return;
}
@@ -4113,7 +4777,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
taosStringBuilderDestroy(&sb1);
tfree(segments);
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
return ret;
}
@@ -4137,8 +4801,8 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < num; ++i) {
SColumn* pCol = taosArrayGetP(pColList, i);
- for (int32_t j = 0; j < pCol->numOfFilters; ++j) {
- SColumnFilterInfo* pColFilter = &pCol->filterInfo[j];
+ for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) {
+ SColumnFilterInfo* pColFilter = &pCol->info.flist.filterInfo[j];
int32_t lowerOptr = pColFilter->lowerRelOptr;
int32_t upperOptr = pColFilter->upperRelOptr;
@@ -4161,26 +4825,29 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
const char* msg0 = "invalid timestamp";
const char* msg1 = "only one time stamp window allowed";
int32_t code = 0;
-
+
if (pExpr == NULL) {
+ pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
}
+ pQueryInfo->onlyHasTagCond &= false;
+
if (!tSqlExprIsParentOfLeaf(pExpr)) {
if (pExpr->tokenId == TK_OR) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
code = getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
if (code) {
return code;
}
-
+
return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (getColumnIndexByName(&pExpr->pLeft->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -4190,7 +4857,7 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (getTimeRange(&win, pRight, pExpr->tokenId, tinfo.precision) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
// update the timestamp query range
@@ -4213,22 +4880,24 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
if (pQueryInfo->numOfTables == 1) {
+ pQueryInfo->onlyHasTagCond &= true;
return TSDB_CODE_SUCCESS;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
+ pQueryInfo->onlyHasTagCond &= false;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
// must be present for join
if (pCondExpr->pJoinExpr == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (!pCondExpr->tsJoin) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -4239,10 +4908,6 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
tSqlExprDestroy(pCondExpr->pTableCond);
}
- if (pCondExpr->pTagCond) {
- tSqlExprDestroy(pCondExpr->pTagCond);
- }
-
if (pCondExpr->pColumnCond) {
tSqlExprDestroy(pCondExpr->pColumnCond);
}
@@ -4262,21 +4927,25 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (left)", pQueryInfo);
}
+
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
-
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+
+ SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
+ tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]);
- if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->ColName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (right)", pQueryInfo);
}
+
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
-
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+
+ pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
+ tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]);
}
}
*/
@@ -4295,7 +4964,7 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
}
if (IS_ARITHMETIC_OPTR(p->_node.optr)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (!IS_RELATION_OPTR(p->_node.optr)) {
@@ -4353,7 +5022,7 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
}
if (retVal != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} while (0);
@@ -4387,7 +5056,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
// TODO: more error handling
} END_TRY
- // add to source column list
+ // add to required table column list
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
int64_t uid = pTableMetaInfo->pTableMeta->id.uid;
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@@ -4396,7 +5065,10 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
for(int32_t j = 0; j < num; ++j) {
SColIndex* pIndex = taosArrayGet(colList, j);
SColumnIndex index = {.tableIndex = i, .columnIndex = pIndex->colIndex - numOfCols};
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+
+ SSchema* s = tscGetTableSchema(pTableMetaInfo->pTableMeta);
+ tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMetaInfo->pTableMeta->id.uid,
+ &s[pIndex->colIndex]);
}
tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw);
@@ -4411,7 +5083,7 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
taosArrayDestroy(colList);
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0 && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "filter on tag not supported for normal table");
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "filter on tag not supported for normal table");
}
if (ret) {
@@ -4426,22 +5098,22 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "timestamp required for join tables";
const char* msg2 = "tag required for join stables";
-
+
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
-
+ SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
+
if (node == NULL || node->tsJoin == NULL || taosArrayGetSize(node->tsJoin) <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
-
+ SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i];
+
if (node == NULL || node->tagJoin == NULL || taosArrayGetSize(node->tagJoin) <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
}
@@ -4451,12 +5123,12 @@ int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t type) {
- SJoinNode *node = nodes[*tidx];
+ SJoinNode *node = nodes[*tidx];
SArray* arr = (type == 0) ? node->tsJoin : node->tagJoin;
size_t size = taosArrayGetSize(arr);
p[*tidx] = 1;
-
+
for (int32_t j = 0; j < size; j++) {
int16_t* idx = taosArrayGet(arr, j);
r[*idx] = 1;
@@ -4469,48 +5141,48 @@ void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes,
int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
const char* msg1 = "not all join tables have same timestamp";
const char* msg2 = "not all join tables have same tag";
-
+
int8_t r[TSDB_MAX_JOIN_TABLE_NUM] = {0};
int8_t p[TSDB_MAX_JOIN_TABLE_NUM] = {0};
-
+
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 0);
-
+
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin);
-
+
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin, &j);
}
}
-
+
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tsJoin) != pQueryInfo->numOfTables) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
-
+
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) {
mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 1);
-
+
taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin);
-
+
for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) {
if (r[j]) {
taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin, &j);
}
}
-
+
memset(r, 0, sizeof(r));
memset(p, 0, sizeof(p));
}
if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJoin) != pQueryInfo->numOfTables) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
}
@@ -4519,11 +5191,11 @@ int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) {
}
-int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) {
+int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
-
+
const char* msg1 = "invalid expression";
const char* msg2 = "invalid filter expression";
@@ -4534,12 +5206,12 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
SCondExpr condExpr = {0};
if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
int32_t type = 0;
if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->tokenId)) != TSDB_CODE_SUCCESS) {
- return ret;
+ goto PARSE_WHERE_EXIT;
}
tSqlExprCompact(pExpr);
@@ -4549,13 +5221,14 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
// 1. check if it is a join query
if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
- return ret;
+ goto PARSE_WHERE_EXIT;
}
// 2. get the query time range
if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
return ret;
}
+
// 3. get the tag query condition
if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
@@ -4572,6 +5245,12 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
goto PARSE_WHERE_EXIT;
}
+ if (taosArrayGetSize(pQueryInfo->pUpstream) > 0 ) {
+ if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pTimewindow, TK_AND)) != TSDB_CODE_SUCCESS) {
+ goto PARSE_WHERE_EXIT;
+ }
+ }
+
// 6. join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
@@ -4587,7 +5266,7 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
}
if (!validateFilterExpr(pQueryInfo)) {
- ret = invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
+ ret = invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
goto PARSE_WHERE_EXIT;
}
@@ -4601,7 +5280,7 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql
ret = mergeJoinNodes(pQueryInfo, pSql);
if (ret) {
goto PARSE_WHERE_EXIT;
- }
+ }
}
PARSE_WHERE_EXIT:
@@ -4621,7 +5300,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
* where ts in ('2015-12-12 4:8:12')
*/
if (pRight->tokenId == TK_SET || optr == TK_IN) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int64_t val = 0;
@@ -4631,31 +5310,19 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
char* seg = strnchr(pRight->value.pz, '-', pRight->value.nLen, false);
if (seg != NULL) {
- if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, TSDB_TIME_PRECISION_MICRO, tsDaylight) == TSDB_CODE_SUCCESS) {
+ if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, timePrecision, tsDaylight) == TSDB_CODE_SUCCESS) {
parsed = true;
} else {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else {
SStrToken token = {.z = pRight->value.pz, .n = pRight->value.nLen, .type = TK_ID};
int32_t len = tGetToken(pRight->value.pz, &token.type);
if ((token.type != TK_INTEGER && token.type != TK_FLOAT) || len != pRight->value.nLen) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
- } else if (pRight->tokenId == TK_INTEGER && timePrecision == TSDB_TIME_PRECISION_MILLI) {
- /*
- * if the pRight->tokenId == TK_INTEGER/TK_FLOAT, the value is adaptive, we
- * need the time precision in metermeta to transfer the value in MICROSECOND
- *
- * Additional check to avoid data overflow
- */
- if (pRight->value.i64 <= INT64_MAX / 1000) {
- pRight->value.i64 *= 1000;
- }
- } else if (pRight->tokenId == TK_FLOAT && timePrecision == TSDB_TIME_PRECISION_MILLI) {
- pRight->value.dKey *= 1000;
}
if (!parsed) {
@@ -4663,39 +5330,26 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
* failed to parse timestamp in regular formation, try next
* it may be a epoch time in string format
*/
- tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
-
- /*
- * transfer it into MICROSECOND format if it is a string, since for
- * TK_INTEGER/TK_FLOAT the value has been transferred
- *
- * additional check to avoid data overflow
- */
- if (pRight->tokenId == TK_STRING && timePrecision == TSDB_TIME_PRECISION_MILLI) {
- if (val <= INT64_MAX / 1000) {
- val *= 1000;
- }
+ if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
+ pRight->value.i64 = convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
+ pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP);
}
- }
- int32_t delta = 1;
- /* for millisecond, delta is 1ms=1000us */
- if (timePrecision == TSDB_TIME_PRECISION_MILLI) {
- delta *= 1000;
+ tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
}
if (optr == TK_LE) {
win->ekey = val;
} else if (optr == TK_LT) {
- win->ekey = val - delta;
+ win->ekey = val - 1;
} else if (optr == TK_GT) {
- win->skey = val + delta;
+ win->skey = val + 1;
} else if (optr == TK_GE) {
win->skey = val;
} else if (optr == TK_EQ) {
win->ekey = win->skey = val;
} else if (optr == TK_NE) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
@@ -4725,7 +5379,7 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutput; ++j) {
if (strncasecmp(fieldName, tscFieldInfoGetField(&pQueryInfo->fieldsInfo, j)->name, (TSDB_COL_NAME_LEN - 1)) == 0) {
const char* msg = "duplicated column name in new table";
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
}
@@ -4733,25 +5387,45 @@ int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySQL) {
- SArray* pFillToken = pQuerySQL->fillType;
+int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
+ SArray* pFillToken = pSqlNode->fillType;
+ if (pSqlNode->fillType == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
tVariantListItem* pItem = taosArrayGet(pFillToken, 0);
const int32_t START_INTERPO_COL_IDX = 1;
- const char* msg = "illegal value or data overflow";
const char* msg1 = "value is expected";
const char* msg2 = "invalid fill option";
const char* msg3 = "top/bottom not support fill";
+ const char* msg4 = "illegal value or data overflow";
+ const char* msg5 = "fill only available for interval query";
+ const char* msg6 = "not supported function now";
+
+ if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ }
+
+ /*
+ * fill options are set at the end position, when all columns are set properly
+ * the columns may be increased due to group by operation
+ */
+ if (checkQueryRangeForFill(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t numOfFields = tscNumOfFields(pQueryInfo);
if (pQueryInfo->fillVal == NULL) {
- pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t));
+ pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t));
+ pQueryInfo->numOfFillVal = (int32_t)numOfFields;
if (pQueryInfo->fillVal == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -4767,6 +5441,9 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ
}
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV;
+ if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@@ -4776,7 +5453,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ
size_t num = taosArrayGetSize(pFillToken);
if (num == 1) { // no actual value, return with error code
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t startPos = 1;
@@ -4806,7 +5483,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ
tVariant* p = taosArrayGet(pFillToken, j);
int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
@@ -4824,14 +5501,14 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQ
}
}
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -4845,36 +5522,37 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
if (isTopBottomQuery(pQueryInfo)) {
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column
- pQueryInfo->order.orderColId = INT32_MIN;
+ } else { // in case of select tbname from super_table, the default order column can not be the primary ts column
+ pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro
}
/* for super table query, set default ascending order for group output */
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
}
+
+ if (pQueryInfo->distinct) {
+ pQueryInfo->order.order = TSDB_ORDER_ASC;
+ pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
+ }
}
-int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode, SSchema* pSchema) {
+int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) {
const char* msg0 = "only support order by primary timestamp";
const char* msg1 = "invalid column name";
- const char* msg2 = "order by primary timestamp or first tag in groupby clause allowed";
+ const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed";
const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed";
+ const char* msg4 = "orderby column must projected in subquery";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- if (pQueryInfo->distinctTag == true) {
- pQueryInfo->order.order = TSDB_ORDER_ASC;
- pQueryInfo->order.orderColId = 0;
- return TSDB_CODE_SUCCESS;
- }
- if (pQuerySqlNode->pSortOrder == NULL) {
+ if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
- SArray* pSortorder = pQuerySqlNode->pSortOrder;
+ char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
+ SArray* pSortOrder = pSqlNode->pSortOrder;
/*
* for table query, there is only one or none order option is allowed, which is the
@@ -4882,19 +5560,19 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode*
*
* for super table query, the order option must be less than 3.
*/
- size_t size = taosArrayGetSize(pSortorder);
- if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
+ size_t size = taosArrayGetSize(pSortOrder);
+ if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
if (size > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(pMsgBuf, msg0);
}
} else {
if (size > 2) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(pMsgBuf, msg3);
}
}
// handle the first part of order by
- tVariant* pVar = taosArrayGet(pSortorder, 0);
+ tVariant* pVar = taosArrayGet(pSortOrder, 0);
// e.g., order by 1 asc, return directly with out further check.
if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
@@ -4905,19 +5583,20 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode*
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
- if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
}
bool orderByTags = false;
bool orderByTS = false;
+ bool orderByGroupbyCol = false;
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(pMsgBuf, msg2);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@@ -4931,99 +5610,168 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode*
orderByTS = true;
}
- if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
+ if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
+ SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
+ if (PRIMARYKEY_TIMESTAMP_COL_INDEX != index.columnIndex && pColIndex->colIndex == index.columnIndex) {
+ orderByGroupbyCol = true;
+ }
+ }
+
+ if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) {
+ return invalidOperationMsg(pMsgBuf, msg3);
} else { // order by top/bottom result value column is not supported in case of interval query.
- assert(!(orderByTags && orderByTS));
+ assert(!(orderByTags && orderByTS && orderByGroupbyCol));
}
- size_t s = taosArrayGetSize(pSortorder);
+ size_t s = taosArrayGetSize(pSortOrder);
if (s == 1) {
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
+ } else if (orderByGroupbyCol) {
+ tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
+
+ pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
+ pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) {
+ int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo);
+ assert(topBotIndex >= 1);
/* order of top/bottom query in interval is not valid */
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
- assert(pExpr->functionId == TSDB_FUNC_TS);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1);
+ assert(pExpr->base.functionId == TSDB_FUNC_TS);
- pExpr = tscSqlExprGet(pQueryInfo, 1);
- if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ pExpr = tscExprGet(pQueryInfo, topBotIndex);
+ if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ return invalidOperationMsg(pMsgBuf, msg2);
}
- tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
return TSDB_CODE_SUCCESS;
} else {
- tVariantListItem* p1 = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
// orderby ts query on super table
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
- addPrimaryTsColIntoResult(pQueryInfo);
+ bool found = false;
+ for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ found = true;
+ break;
+ }
+ }
+ if (!found && pQueryInfo->pDownstream) {
+ return invalidOperationMsg(pMsgBuf, msg4);
+ }
+ addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
}
- }
-
- if (s == 2) {
- tVariantListItem *pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ } else {
+ tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
pQueryInfo->groupbyExpr.orderType = pItem->sortOrder;
+ } else if (orderByGroupbyCol){
+ pQueryInfo->order.order = pItem->sortOrder;
+ pQueryInfo->order.orderColId = index.columnIndex;
} else {
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
- pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 1);
+ pItem = taosArrayGet(pSqlNode->pSortOrder, 1);
tVariant* pVar2 = &pItem->pVar;
SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
- if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(pMsgBuf, msg2);
} else {
- tVariantListItem* p1 = taosArrayGet(pSortorder, 1);
+ tVariantListItem* p1 = taosArrayGet(pSortOrder, 1);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
- } else { // meter query
- if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ bool validOrder = false;
+ SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
+ if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
+ SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
+ validOrder = (pColIndex->colIndex == index.columnIndex);
+ }
+
+ if (!validOrder) {
+ return invalidOperationMsg(pMsgBuf, msg2);
+ }
+
+ tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
+ pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
+ pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
}
if (isTopBottomQuery(pQueryInfo)) {
- /* order of top/bottom query in interval is not valid */
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
- assert(pExpr->functionId == TSDB_FUNC_TS);
+ bool validOrder = false;
+ SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
+ if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
+ SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
+ validOrder = (pColIndex->colIndex == index.columnIndex);
+ } else {
+ int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo);
+ assert(topBotIndex >= 1);
+ /* order of top/bottom query in interval is not valid */
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1);
+ assert(pExpr->base.functionId == TSDB_FUNC_TS);
- pExpr = tscSqlExprGet(pQueryInfo, 1);
- if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ pExpr = tscExprGet(pQueryInfo, topBotIndex);
+ if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ return invalidOperationMsg(pMsgBuf, msg2);
+ }
+
+ validOrder = true;
+ }
+
+ if (!validOrder) {
+ return invalidOperationMsg(pMsgBuf, msg2);
}
- tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
+
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
return TSDB_CODE_SUCCESS;
}
- tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->pSortOrder, 0);
+ tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
+ pQueryInfo->order.order = pItem->sortOrder;
+ pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
+ } else {
+ // handle the temp table order by clause. You can order by any single column in case of the temp table, created by
+ // inner subquery.
+ assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1);
+
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
+ }
+
+ tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
+ pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
}
return TSDB_CODE_SUCCESS;
@@ -5054,20 +5802,23 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg18 = "primary timestamp column cannot be dropped";
const char* msg19 = "invalid new tag name";
const char* msg20 = "table is not super table";
+ const char* msg21 = "only binary/nchar column length could be modified";
+ const char* msg23 = "only column length coulbe be modified";
+ const char* msg24 = "invalid binary/nchar column length";
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
SAlterTableInfo* pAlterSQL = pInfo->pAlterInfo;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- code = tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql);
+ code = tscSetTableFullName(&pTableMetaInfo->name, &(pAlterSQL->name), pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -5077,63 +5828,65 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return code;
}
+ char* pMsg = tscGetErrorMsgPayload(pCmd);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg20);
+ return invalidOperationMsg(pMsg, msg20);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
- pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
- if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
+ if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ return invalidOperationMsg(pMsg, msg3);
}
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
- } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
+ return invalidOperationMsg(pMsg, msg4);
+ } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) &&
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(pMsg, msg6);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
SArray* pFieldList = pAlterSQL->pAddColumns;
if (taosArrayGetSize(pFieldList) > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(pMsg, msg5);
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
- if (!validateOneTags(pCmd, p)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ int32_t ret = validateOneTag(pCmd, p);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
}
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
if (tscGetNumOfTags(pTableMeta) == 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(pMsg, msg7);
}
// numOfTags == 1
if (taosArrayGetSize(pAlterSQL->varList) > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ return invalidOperationMsg(pMsg, msg8);
}
tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0);
if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ return invalidOperationMsg(pMsg, msg9);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (getColumnIndexByName(&name, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex < numOfCols) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(pMsg, msg10);
} else if (index.columnIndex == numOfCols) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11);
+ return invalidOperationMsg(pMsg, msg11);
}
char name1[128] = {0};
@@ -5144,31 +5897,31 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
SArray* pVarList = pAlterSQL->varList;
if (taosArrayGetSize(pVarList) > 2) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
tVariantListItem* pSrcItem = taosArrayGet(pAlterSQL->varList, 0);
tVariantListItem* pDstItem = taosArrayGet(pAlterSQL->varList, 1);
if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ return invalidOperationMsg(pMsg, msg9);
}
if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
+ return invalidOperationMsg(pMsg, msg10);
}
SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER;
SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER;
SStrToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
SStrToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg19);
+ if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex, tscGetErrorMsgPayload(pCmd)) == TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg19);
}
tVariantListItem* pItem = taosArrayGet(pVarList, 0);
@@ -5195,20 +5948,24 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = item->pVar.pz, .n = item->pVar.nLen};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMeta)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg12);
+ return invalidOperationMsg(pMsg, msg12);
}
tVariantListItem* pItem = taosArrayGet(pVarList, 1);
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
+
+ if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) {
+ return invalidOperationMsg(pMsg, msg14);
+ }
pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13);
+ return invalidOperationMsg(pMsg, msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
@@ -5216,14 +5973,14 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
varDataTLen(pAlterSQL->tagData.data) > pTagsSchema->bytes) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg14);
+ return invalidOperationMsg(pMsg, msg14);
}
int32_t schemaLen = sizeof(STColumn) * numOfTags;
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
+ tscError("0x%"PRIx64" failed to malloc for alter table pMsg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -5238,7 +5995,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pUpdateMsg->numOfTags = htons(numOfTags);
pUpdateMsg->schemaLen = htonl(schemaLen);
- // the schema is located after the msg body, then followed by true tag value
+ // the schema is located after the pMsg body, then followed by true tag value
char* d = pUpdateMsg->data;
SSchema* pTagCols = tscGetTableTagSchema(pTableMeta);
for (int i = 0; i < numOfTags; ++i) {
@@ -5251,7 +6008,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
d += sizeof(STColumn);
}
- // copy the tag value to msg body
+ // copy the tag value to pMsg body
pItem = taosArrayGet(pVarList, 1);
tVariantDump(&pItem->pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true);
@@ -5270,62 +6027,167 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
SArray* pFieldList = pAlterSQL->pAddColumns;
if (taosArrayGetSize(pFieldList) > 1) {
- const char* msg = "only support add one column";
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ const char* msgx = "only support add one column";
+ return invalidOperationMsg(pMsg, msgx);
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
- if (!validateOneColumn(pCmd, p)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ int32_t ret = validateOneColumn(pCmd, p);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
}
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
if (tscGetNumOfColumns(pTableMeta) == TSDB_MIN_COLUMNS) { //
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg15);
+ return invalidOperationMsg(pMsg, msg15);
}
size_t size = taosArrayGetSize(pAlterSQL->varList);
if (size > 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg16);
+ return invalidOperationMsg(pMsg, msg16);
}
tVariantListItem* pItem = taosArrayGet(pAlterSQL->varList, 0);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
- if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
}
if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg18);
+ return invalidOperationMsg(pMsg, msg18);
}
char name1[TSDB_COL_NAME_LEN] = {0};
tstrncpy(name1, pItem->pVar.pz, sizeof(name1));
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
- }
-
- return TSDB_CODE_SUCCESS;
-}
+ } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
+ if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
+ return invalidOperationMsg(pMsg, msg16);
+ }
-int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
- const char* msg0 = "sample interval can not be less than 10ms.";
- const char* msg1 = "functions not allowed in select clause";
- if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
- pQueryInfo->interval.intervalUnit != 'n' &&
- pQueryInfo->interval.intervalUnit != 'y') {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
- }
-
- size_t size = taosArrayGetSize(pQueryInfo->exprList);
- for (int32_t i = 0; i < size; ++i) {
- int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId;
- if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
+ if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
+ return invalidOperationMsg(pMsg, msg21);
+ }
+
+ SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
+ SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
+ }
+
+ SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
+
+ if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
+ return invalidOperationMsg(pMsg, msg21);
+ }
+
+ if (pItem->type != pColSchema->type) {
+ return invalidOperationMsg(pMsg, msg23);
+ }
+
+ if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
+ (pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
+ return invalidOperationMsg(pMsg, msg24);
+ }
+
+ if (pItem->bytes <= pColSchema->bytes) {
+ return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, pMsg, pItem->name, NULL);
+ }
+
+ SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
+ int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns;
+ int16_t i;
+ uint32_t nLen = 0;
+ for (i = 0; i < numOfColumns; ++i) {
+ nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes;
+ }
+ if (nLen >= TSDB_MAX_BYTES_PER_ROW) {
+ return invalidOperationMsg(pMsg, msg24);
+ }
+ TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
+ tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
+ }else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
+ if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) {
+ return invalidOperationMsg(pMsg, msg16);
+ }
+
+ TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0);
+ if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) {
+ return invalidOperationMsg(pMsg, msg21);
+ }
+
+ SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
+ SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
+ if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsg, msg17);
+ }
+
+ SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
+
+ if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
+ return invalidOperationMsg(pMsg, msg10);
+ }
+
+ if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) {
+ return invalidOperationMsg(pMsg, msg21);
+ }
+
+ if (pItem->type != pColSchema->type) {
+ return invalidOperationMsg(pMsg, msg23);
+ }
+
+ if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) ||
+ (pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) {
+ return invalidOperationMsg(pMsg, msg24);
+ }
+
+ if (pItem->bytes <= pColSchema->bytes) {
+ return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_TAG_LENGTH, pMsg, pItem->name, NULL);
+ }
+
+ SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
+ int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
+ int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
+ int32_t tagIndex = columnIndex.columnIndex - numOfCols;
+ assert(tagIndex>=0);
+ uint32_t nLen = 0;
+ for (int i = 0; i < numOfTags; ++i) {
+ nLen += (i != tagIndex) ? pSchema[i].bytes : pItem->bytes;
+ }
+ if (nLen >= TSDB_MAX_TAGS_LEN) {
+ return invalidOperationMsg(pMsg, msg24);
+ }
+
+ TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
+ tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
+ const char* msg0 = "sample interval can not be less than 10ms.";
+ const char* msg1 = "functions not allowed in select clause";
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+ if (pQueryInfo->interval.interval != 0 &&
+ convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MILLI)< 10 &&
+ pQueryInfo->interval.intervalUnit != 'n' &&
+ pQueryInfo->interval.intervalUnit != 'y') {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ }
+
+ size_t size = taosArrayGetSize(pQueryInfo->exprList);
+ for (int32_t i = 0; i < size; ++i) {
+ int32_t functId = tscExprGet(pQueryInfo, i)->base.functionId;
+ if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -5334,20 +6196,30 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
bool isProjectionFunction = false;
- const char* msg1 = "column projection is not compatible with interval";
+ const char* msg1 = "functions not compatible with interval";
// multi-output set/ todo refactor
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t k = 0; k < size; ++k) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, k);
+
+ if (pExpr->base.functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * pExpr->base.functionId - 1);
+ if (pUdfInfo->funcType == TSDB_UDF_TYPE_SCALAR) {
+ isProjectionFunction = true;
+ break;
+ } else {
+ continue;
+ }
+ }
// projection query on primary timestamp, the selectivity function needs to be present.
- if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
bool hasSelectivity = false;
for (int32_t j = 0; j < size; ++j) {
- SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j);
- if ((aAggs[pEx->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) {
+ SExprInfo* pEx = tscExprGet(pQueryInfo, j);
+ if ((aAggs[pEx->base.functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) {
hasSelectivity = true;
break;
}
@@ -5358,17 +6230,18 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
}
}
- if ((pExpr->functionId == TSDB_FUNC_PRJ && pExpr->numOfParams == 0) || pExpr->functionId == TSDB_FUNC_DIFF ||
- pExpr->functionId == TSDB_FUNC_ARITHM) {
+ int32_t f = pExpr->base.functionId;
+ if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE) {
isProjectionFunction = true;
+ break;
}
}
if (isProjectionFunction) {
- invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- return isProjectionFunction == true ? TSDB_CODE_TSC_INVALID_SQL : TSDB_CODE_SUCCESS;
+ return isProjectionFunction == true ? TSDB_CODE_TSC_INVALID_OPERATION : TSDB_CODE_SUCCESS;
}
typedef struct SDNodeDynConfOption {
@@ -5385,12 +6258,12 @@ int32_t validateEp(char* ep) {
if (NULL == pos) {
int32_t val = strtol(ep, NULL, 10);
if (val <= 0 || val > 65536) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else {
uint16_t port = atoi(pos + 1);
if (0 == port) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -5401,7 +6274,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 2 || numOfToken > 3) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
const int tokenLogEnd = 2;
@@ -5436,7 +6309,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
strdequote(pValToken->z);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
if (!parseOk) {
- return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
+ return TSDB_CODE_TSC_INVALID_OPERATION; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
@@ -5444,7 +6317,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
- return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
+ return TSDB_CODE_TSC_INVALID_OPERATION; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
@@ -5453,7 +6326,7 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 0 || val > 256) {
/* options value is out of valid range */
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
@@ -5466,13 +6339,13 @@ int32_t validateDNodeConfig(SMiscInfo* pOptions) {
}
}
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t validateLocalConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 1 || numOfToken > 2) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12},
@@ -5495,7 +6368,7 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
int32_t val = strtol(pValToken->z, NULL, 10);
if (!validateDebugFlag(val)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
for (int32_t i = 1; i < tListLen(LOCAL_DYNAMIC_CFG_OPTIONS); ++i) {
@@ -5506,20 +6379,20 @@ int32_t validateLocalConfig(SMiscInfo* pOptions) {
}
}
}
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t validateColumnName(char* name) {
bool ret = taosIsKeyWordToken(name, (int32_t)strlen(name));
if (ret) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
SStrToken token = {.z = name};
token.n = tGetToken(name, &token.type);
if (token.type != TK_STRING && token.type != TK_ID) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (token.type == TK_STRING) {
@@ -5529,13 +6402,13 @@ int32_t validateColumnName(char* name) {
int32_t k = tGetToken(token.z, &token.type);
if (k != token.n) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return validateColumnName(token.z);
} else {
if (isNumber(&token)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
@@ -5547,10 +6420,14 @@ bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) {
return true;
}
- return (pQueryInfo->window.skey == pQueryInfo->window.ekey) && (pQueryInfo->window.skey != 0);
+ if (pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX) {
+ return false;
+ }
+
+ return !(pQueryInfo->window.skey != pQueryInfo->window.ekey && pQueryInfo->interval.interval == 0);
}
-int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySqlNode* pQuerySqlNode, SSqlObj* pSql) {
+int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
const char* msg0 = "soffset/offset can not be less than 0";
@@ -5558,15 +6435,15 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
const char* msg2 = "slimit/soffset can not apply to projection query";
// handle the limit offset value, validate the limit
- pQueryInfo->limit = pQuerySqlNode->limit;
+ pQueryInfo->limit = pSqlNode->limit;
pQueryInfo->clauseLimit = pQueryInfo->limit.limit;
- pQueryInfo->slimit = pQuerySqlNode->slimit;
+ pQueryInfo->slimit = pSqlNode->slimit;
tscDebug("0x%"PRIx64" limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql->self,
pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset);
if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->limit.limit == 0) {
@@ -5580,7 +6457,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query
if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// for projection query on super table, all queries are subqueries
@@ -5603,10 +6480,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
* And then launching multiple async-queries against all qualified virtual nodes, during the first-stage
* query operation.
*/
- int32_t code = tscGetSTableVgroupInfo(pSql, clauseIndex);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+// assert(allVgroupInfoRetrieved(pQueryInfo));
// No tables included. No results generated. Query results are empty.
if (pTableMetaInfo->vgroupList->numOfVgroups == 0) {
@@ -5622,8 +6496,8 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
/*
- * the offset value should be removed during retrieve data from virtual node, since the
- * global order are done in client side, so the offset is applied at the client side
+ * The offset value should be removed during retrieve data from virtual node, since the
+ * global order are done at the client side, so the offset is applied at the client side.
* However, note that the maximum allowed number of result for each table should be less
* than or equal to the value of limit.
*/
@@ -5636,7 +6510,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
}
} else {
if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -5644,48 +6518,40 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
}
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
- const char* msg = "invalid number of options";
+ const char* msg1 = "invalid number of keep options";
+ const char* msg2 = "invalid keep value";
+ const char* msg3 = "invalid keep value, should be keep0 <= keep1 <= keep2";
- pMsg->daysToKeep = htonl(-1);
+ pMsg->daysToKeep0 = htonl(-1);
pMsg->daysToKeep1 = htonl(-1);
pMsg->daysToKeep2 = htonl(-1);
SArray* pKeep = pCreateDb->keep;
if (pKeep != NULL) {
size_t s = taosArrayGetSize(pKeep);
- tVariantListItem* p0 = taosArrayGet(pKeep, 0);
- switch (s) {
- case 1: {
- if ((int32_t)p0->pVar.i64 <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
- }
- pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
- }
- break;
- case 2: {
- tVariantListItem* p1 = taosArrayGet(pKeep, 1);
- if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
- }
- pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
- pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
- break;
- }
- case 3: {
- tVariantListItem* p1 = taosArrayGet(pKeep, 1);
- tVariantListItem* p2 = taosArrayGet(pKeep, 2);
+#ifdef _STORAGE
+ if (s >= 4 ||s <= 0) {
+#else
+ if (s != 1) {
+#endif
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
- if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
- }
+ tVariantListItem* p0 = taosArrayGet(pKeep, 0);
+ tVariantListItem* p1 = (s > 1) ? taosArrayGet(pKeep, 1) : p0;
+ tVariantListItem* p2 = (s > 2) ? taosArrayGet(pKeep, 2) : p1;
- pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
- pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
- pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
- break;
- }
- default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); }
+ if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+ if (!(((int32_t)p0->pVar.i64 <= (int32_t)p1->pVar.i64) && ((int32_t)p1->pVar.i64 <= (int32_t)p2->pVar.i64))) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
+
+ pMsg->daysToKeep0 = htonl((int32_t)p0->pVar.i64);
+ pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
+ pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
+
}
return TSDB_CODE_SUCCESS;
@@ -5707,11 +6573,15 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_MICRO_STR, pToken->n) == 0 &&
strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) {
pMsg->precision = TSDB_TIME_PRECISION_MICRO;
+ } else if (strncmp(pToken->z, TSDB_TIME_PRECISION_NANO_STR, pToken->n) == 0 &&
+ strlen(TSDB_TIME_PRECISION_NANO_STR) == pToken->n) {
+ pMsg->precision = TSDB_TIME_PRECISION_NANO;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
+
return TSDB_CODE_SUCCESS;
}
@@ -5740,47 +6610,48 @@ int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql) {
setCreateDBOption(pMsg, pCreateDbSql);
if (setKeepOption(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (setTimePrecision(pCmd, pMsg, pCreateDbSql) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (tscCheckCreateDbParams(pCmd, pMsg) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
}
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex) {
- SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentObj->cmd, subClauseIndex);
+ SQueryInfo* pParentQueryInfo = tscGetQueryInfo(&pParentObj->cmd);
if (pParentQueryInfo->groupbyExpr.numOfGroupCols > 0) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex);
- SSqlExpr* pExpr = NULL;
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ SExprInfo* pExpr = NULL;
size_t size = taosArrayGetSize(pQueryInfo->exprList);
if (size > 0) {
- pExpr = tscSqlExprGet(pQueryInfo, (int32_t)size - 1);
+ pExpr = tscExprGet(pQueryInfo, (int32_t)size - 1);
}
- if (pExpr == NULL || pExpr->functionId != TSDB_FUNC_TAG) {
+ if (pExpr == NULL || pExpr->base.functionId != TSDB_FUNC_TAG) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pParentQueryInfo, tableIndex);
- int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
+ uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
+ int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, uid);
SSchema* pTagSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, colId);
int16_t colIndex = tscGetTagColIndexById(pTableMetaInfo->pTableMeta, colId);
- SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex};
+ SColumnIndex index = {.tableIndex = 0, .columnIndex = colIndex};
char* name = pTagSchema->name;
int16_t type = pTagSchema->type;
int16_t bytes = pTagSchema->bytes;
- pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
- pExpr->colInfo.flag = TSDB_COL_TAG;
+ pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(&pSql->cmd), bytes, true);
+ pExpr->base.colInfo.flag = TSDB_COL_TAG;
// NOTE: tag column does not add to source column list
SColumnList ids = {0};
@@ -5788,38 +6659,40 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau
int32_t relIndex = index.columnIndex;
- pExpr->colInfo.colIndex = relIndex;
+ pExpr->base.colInfo.colIndex = relIndex;
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
pColIndex->colIndex = relIndex;
- index = (SColumnIndex) {.tableIndex = tableIndex, .columnIndex = relIndex};
- tscColumnListInsert(pTableMetaInfo->tagColList, &index);
+ tscColumnListInsert(pTableMetaInfo->tagColList, relIndex, uid, pTagSchema);
}
}
}
// limit the output to be 1 for each state value
-static void doLimitOutputNormalColOfGroupby(SSqlExpr* pExpr) {
+static void doLimitOutputNormalColOfGroupby(SExprInfo* pExpr) {
int32_t outputRow = 1;
- tVariantCreateFromBinary(&pExpr->param[0], (char*)&outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT);
- pExpr->numOfParams = 1;
+ tVariantCreateFromBinary(&pExpr->base.param[0], (char*)&outputRow, sizeof(int32_t), TSDB_DATA_TYPE_INT);
+ pExpr->base.numOfParams = 1;
}
-void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) {
+void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlCmd* pCmd) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, tagIndex);
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->colIndex);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pColIndex->colIndex};
- tscAddFuncInSelectClause(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL);
+ SExprInfo* pExprInfo = tscAddFuncInSelectClause(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema,
+ TSDB_COL_NORMAL, getNewResColId(pCmd));
+
+ strncpy(pExprInfo->base.token, pExprInfo->base.colInfo.name, tListLen(pExprInfo->base.token));
int32_t numOfFields = tscNumOfFields(pQueryInfo);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfFields - 1);
- doLimitOutputNormalColOfGroupby(pInfo->pSqlExpr);
+ doLimitOutputNormalColOfGroupby(pInfo->pExpr);
pInfo->visible = false;
}
@@ -5832,25 +6705,29 @@ static void doUpdateSqlFunctionForTagPrj(SQueryInfo* pQueryInfo) {
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_TAGPRJ || pExpr->functionId == TSDB_FUNC_TAG) {
- pExpr->functionId = TSDB_FUNC_TAG_DUMMY;
- tagLength += pExpr->resBytes;
- } else if (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- pExpr->functionId = TSDB_FUNC_TS_DUMMY;
- tagLength += pExpr->resBytes;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ || pExpr->base.functionId == TSDB_FUNC_TAG) {
+ pExpr->base.functionId = TSDB_FUNC_TAG_DUMMY;
+ tagLength += pExpr->base.resBytes;
+ } else if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ pExpr->base.functionId = TSDB_FUNC_TS_DUMMY;
+ tagLength += pExpr->base.resBytes;
}
}
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if ((pExpr->functionId != TSDB_FUNC_TAG_DUMMY && pExpr->functionId != TSDB_FUNC_TS_DUMMY) &&
- !(pExpr->functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->colInfo.flag))) {
- SSchema* pColSchema = &pSchema[pExpr->colInfo.colIndex];
- getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->functionId, (int32_t)pExpr->param[0].i64, &pExpr->resType,
- &pExpr->resBytes, &pExpr->interBytes, tagLength, isSTable);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId < 0) {
+ continue;
+ }
+
+ if ((pExpr->base.functionId != TSDB_FUNC_TAG_DUMMY && pExpr->base.functionId != TSDB_FUNC_TS_DUMMY) &&
+ !(pExpr->base.functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag))) {
+ SSchema* pColSchema = &pSchema[pExpr->base.colInfo.colIndex];
+ getResultDataInfo(pColSchema->type, pColSchema->bytes, pExpr->base.functionId, (int32_t)pExpr->base.param[0].i64, &pExpr->base.resType,
+ &pExpr->base.resBytes, &pExpr->base.interBytes, tagLength, isSTable, NULL);
}
}
}
@@ -5859,24 +6736,24 @@ static int32_t doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) {
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_PRJ && (!TSDB_COL_IS_UD_COL(pExpr->colInfo.flag) && (pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX))) {
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && (!TSDB_COL_IS_UD_COL(pExpr->base.colInfo.flag) && (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX))) {
bool qualifiedCol = false;
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
- if (pExpr->colInfo.colId == pColIndex->colId) {
+ if (pExpr->base.colInfo.colId == pColIndex->colId) {
qualifiedCol = true;
doLimitOutputNormalColOfGroupby(pExpr);
- pExpr->numOfParams = 1;
+ pExpr->base.numOfParams = 1;
break;
}
}
// it is not a tag column/tbname column/user-defined column, return error
if (!qualifiedCol) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
@@ -5884,7 +6761,7 @@ static int32_t doUpdateSqlFunctionForColPrj(SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId) {
+static bool tagColumnInGroupby(SGroupbyExpr* pGroupbyExpr, int16_t columnId) {
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j);
@@ -5902,10 +6779,10 @@ static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) {
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_PRJ) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ) {
hasColumnPrj = true;
- } else if (pExpr->functionId == TSDB_FUNC_TAGPRJ) {
+ } else if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ) {
hasTagPrj = true;
}
}
@@ -5917,14 +6794,14 @@ static bool onlyTagPrjFunction(SQueryInfo* pQueryInfo) {
static bool allTagPrjInGroupby(SQueryInfo* pQueryInfo) {
bool allInGroupby = true;
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId != TSDB_FUNC_TAGPRJ) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId != TSDB_FUNC_TAGPRJ) {
continue;
}
- if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->colInfo.colId)) {
+ if (!tagColumnInGroupby(&pQueryInfo->groupbyExpr, pExpr->base.colInfo.colId)) {
allInGroupby = false;
break;
}
@@ -5938,9 +6815,9 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_TAGPRJ) {
- pExpr->functionId = TSDB_FUNC_TAG;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ) {
+ pExpr->base.functionId = TSDB_FUNC_TAG;
}
}
}
@@ -5951,9 +6828,9 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
* 2. if selectivity function and tagprj function both exist, there should be only
* one selectivity function exists.
*/
-static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
+static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
const char* msg1 = "only one selectivity function allowed in presence of tags function";
- const char* msg3 = "aggregation function should not be mixed up with projection";
+ const char* msg2 = "aggregation function should not be mixed up with projection";
bool tagTsColExists = false;
int16_t numOfSelectivity = 0;
@@ -5961,20 +6838,29 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
- if (pExpr->functionId == TSDB_FUNC_TAGPRJ ||
- (pExpr->functionId == TSDB_FUNC_PRJ && pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) {
+ SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
+ if (pExpr->base.functionId == TSDB_FUNC_TAGPRJ ||
+ (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX)) {
tagTsColExists = true; // selectivity + ts/tag column
break;
}
}
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
+ SExprInfo* pExpr = taosArrayGetP(pQueryInfo->exprList, i);
- int16_t functionId = pExpr->functionId;
+ int16_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS ||
- functionId == TSDB_FUNC_ARITHM) {
+ functionId == TSDB_FUNC_ARITHM || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
+ if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
+ ++numOfAggregation;
+ }
+
continue;
}
@@ -5989,7 +6875,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
// selectivity function exist in select clause is not allowed.
if (numOfAggregation > 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(msg, msg1);
}
/*
@@ -6008,17 +6894,17 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
* Otherwise, return with error code.
*/
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- int16_t functionId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int16_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == 0) {
continue;
}
if ((functionId == TSDB_FUNC_LAST_ROW) ||
- (functionId == TSDB_FUNC_LAST_DST && (pExpr->colInfo.flag & TSDB_COL_NULL) != 0)) {
+ (functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) {
// do nothing
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(msg, msg1);
}
}
@@ -6031,7 +6917,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
} else {
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(msg, msg2);
}
if (numOfAggregation > 0 || numOfSelectivity > 0) {
@@ -6049,71 +6935,76 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
}
static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
- const char* msg2 = "interval not allowed in group by normal column";
+ const char* msg1 = "interval not allowed in group by normal column";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- SSchema s = *tGetTbnameColumnSchema();
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
- int16_t bytes = 0;
- int16_t type = 0;
- char* name = NULL;
+
+ SSchema* tagSchema = NULL;
+ if (!UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
+ tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
+ }
+
+ SSchema tmp = {.type = 0, .name = "", .colId = 0, .bytes = 0};
+ SSchema* s = &tmp;
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, i);
int16_t colIndex = pColIndex->colIndex;
+
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- type = s.type;
- bytes = s.bytes;
- name = s.name;
+ s = tGetTbnameColumnSchema();
} else {
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
- SSchema* tagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
-
- type = tagSchema[colIndex].type;
- bytes = tagSchema[colIndex].bytes;
- name = tagSchema[colIndex].name;
+ if(tagSchema){
+ s = &tagSchema[colIndex];
+ }
} else {
- type = pSchema[colIndex].type;
- bytes = pSchema[colIndex].bytes;
- name = pSchema[colIndex].name;
+ s = &pSchema[colIndex];
}
}
-
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
-
+
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
+
+ int32_t f = TSDB_FUNC_TAG;
+ if (tscIsDiffDerivQuery(pQueryInfo)) {
+ f = TSDB_FUNC_TAGPRJ;
+ }
+
+ int32_t pos = tscGetFirstInvisibleFieldPos(pQueryInfo);
+
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
- SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
-
- memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
- tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
-
- pExpr->colInfo.flag = TSDB_COL_TAG;
+ SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
+
+ memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
+ tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
+ tstrncpy(pExpr->base.token, s->name, sizeof(pExpr->base.aliasName));
+
+ pExpr->base.colInfo.flag = TSDB_COL_TAG;
// NOTE: tag column does not add to source column list
- SColumnList ids = getColumnList(1, 0, pColIndex->colIndex);
- insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr);
+ SColumnList ids = createColumnList(1, 0, pColIndex->colIndex);
+ insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, s->name, pExpr);
} else {
// if this query is "group by" normal column, time window query is not allowed
if (isTimeWindowQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
+ size_t size = tscNumOfExprs(pQueryInfo);
+
bool hasGroupColumn = false;
for (int32_t j = 0; j < size; ++j) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, j);
- if (pExpr->colInfo.colId == pColIndex->colId) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, j);
+ if ((pExpr->base.functionId == TSDB_FUNC_PRJ) && pExpr->base.colInfo.colId == pColIndex->colId) {
+ hasGroupColumn = true;
break;
}
}
- /*
- * if the group by column does not required by user, add this column into the final result set
- * but invisible to user
- */
+ //if the group by column does not required by user, add an invisible column into the final result set.
if (!hasGroupColumn) {
- doAddGroupColumnForSubquery(pQueryInfo, i);
+ doAddGroupColumnForSubquery(pQueryInfo, i, pCmd);
}
}
}
@@ -6125,11 +7016,11 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
bool tagProjection = false;
bool tableCounting = false;
- int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfCols = (int32_t) tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- int32_t functionId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int32_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TAGPRJ) {
tagProjection = true;
@@ -6137,7 +7028,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
}
if (functionId == TSDB_FUNC_COUNT) {
- assert(pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX);
+ assert(pExpr->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX);
tableCounting = true;
}
}
@@ -6145,110 +7036,159 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
return (tableCounting && tagProjection)? -1:0;
}
-int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
+int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg) {
const char* msg1 = "functions/columns not allowed in group by query";
const char* msg2 = "projection query on columns not allowed";
- const char* msg3 = "group by not allowed on projection query";
+ const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up";
+ const char* msg6 = "TWA/Diff/Derivative/Irate only support group by tbname";
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(msg, msg5);
}
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || isTimeWindowQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(msg, msg4);
} else {
return TSDB_CODE_SUCCESS;
}
}
+ if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
+ return invalidOperationMsg(msg, msg3);
+ }
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
// check if all the tags prj columns belongs to the group by columns
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
+ // It is a groupby aggregate query, the tag project function is not suitable for this case.
updateTagPrjFunction(pQueryInfo);
+
return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
}
// check all query functions in selection clause, multi-output functions are not allowed
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- int32_t functId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int32_t f = pExpr->base.functionId;
/*
* group by normal columns.
* Check if the column projection is identical to the group by column or not
*/
- if (functId == TSDB_FUNC_PRJ && pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (f == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
bool qualified = false;
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
- if (pColIndex->colId == pExpr->colInfo.colId) {
+ if (pColIndex->colId == pExpr->base.colInfo.colId) {
qualified = true;
break;
}
}
if (!qualified) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(msg, msg2);
+ }
+ }
+
+ if (f < 0) {
+ continue;
+ }
+
+ if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE)) {
+ for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
+ SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
+ if (j == 0) {
+ if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
+ return invalidOperationMsg(msg, msg6);
+ }
+ } else if (!TSDB_COL_IS_TAG(pColIndex->flag)) {
+ return invalidOperationMsg(msg, msg6);
+ }
}
}
- if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
- functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM && f != TSDB_FUNC_DIFF &&
+ f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) {
+ return invalidOperationMsg(msg, msg1);
}
- if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ return invalidOperationMsg(msg, msg1);
}
}
- if (checkUpdateTagPrjFunctions(pQueryInfo, pCmd) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- /*
- * group by tag function must be not changed the function name, otherwise, the group operation may fail to
- * divide the subset of final result.
- */
if (doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
// projection query on super table does not compatible with "group by" syntax
- if (tscIsProjectionQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (tscIsProjectionQuery(pQueryInfo) && !(tscIsDiffDerivQuery(pQueryInfo))) {
+ return invalidOperationMsg(msg, msg3);
}
return TSDB_CODE_SUCCESS;
} else {
- return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
+ return checkUpdateTagPrjFunctions(pQueryInfo, msg);
+ }
+}
+
+
+int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) {
+ const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table without group by tbname";
+
+ int32_t numOfExprs = (int32_t)tscNumOfExprs(pQueryInfo);
+ size_t upNum = taosArrayGetSize(pQueryInfo->pUpstream);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+
+ int32_t f = pExpr->base.functionId;
+ if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF) {
+ for (int32_t j = 0; j < upNum; ++j) {
+ SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j);
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0);
+ bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
+ if ((!isSTable) || groupbyTbname(pUp)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ return invalidOperationMsg(msg, msg1);
+ }
}
+
+ return TSDB_CODE_SUCCESS;
}
-int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode* pQuerySqlNode) {
+
+
+int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
const char* msg3 = "invalid function";
- SArray* pExprList = pQuerySqlNode->pSelectList;
+ SArray* pExprList = pSqlNode->pSelNodeList;
size_t size = taosArrayGetSize(pExprList);
if (size != 1) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
bool server_status = false;
tSqlExprItem* pExprItem = taosArrayGet(pExprList, 0);
tSqlExpr* pExpr = pExprItem->pNode;
- if (pExpr->operand.z == NULL) {
+ if (pExpr->Expr.operand.z == NULL) {
//handle 'select 1'
- if (pExpr->token.n == 1 && 0 == strncasecmp(pExpr->token.z, "1", 1)) {
+ if (pExpr->exprToken.n == 1 && 0 == strncasecmp(pExpr->exprToken.z, "1", 1)) {
server_status = true;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// TODO redefine the function
@@ -6263,8 +7203,8 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode
index = 2;
} else {
for (int32_t i = 0; i < tListLen(functionsInfo); ++i) {
- if (strncasecmp(functionsInfo[i].name, pExpr->token.z, functionsInfo[i].len) == 0 &&
- functionsInfo[i].len == pExpr->token.n) {
+ if (strncasecmp(functionsInfo[i].name, pExpr->exprToken.z, functionsInfo[i].len) == 0 &&
+ functionsInfo[i].len == pExpr->exprToken.n) {
index = i;
break;
}
@@ -6276,22 +7216,22 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySqlNode
pQueryInfo->command = TSDB_SQL_CURRENT_DB;break;
case 1:
pQueryInfo->command = TSDB_SQL_SERV_VERSION;break;
- case 2:
+ case 2:
pQueryInfo->command = TSDB_SQL_SERV_STATUS;break;
case 3:
pQueryInfo->command = TSDB_SQL_CLI_VERSION;break;
case 4:
pQueryInfo->command = TSDB_SQL_CURRENT_USER;break;
- default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); }
+ default: { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); }
}
SColumnIndex ind = {0};
- SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
- tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pQueryInfo), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false);
+ SExprInfo* pExpr1 = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
+ tDataTypes[TSDB_DATA_TYPE_INT].bytes, getNewResColId(pCmd), tDataTypes[TSDB_DATA_TYPE_INT].bytes, false);
tSqlExprItem* item = taosArrayGet(pExprList, 0);
const char* name = (item->aliasName != NULL)? item->aliasName:functionsInfo[index].name;
- tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
+ tstrncpy(pExpr1->base.aliasName, name, tListLen(pExpr1->base.aliasName));
return TSDB_CODE_SUCCESS;
}
@@ -6302,69 +7242,77 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
if (pCreate->walLevel != -1 && (pCreate->walLevel < TSDB_MIN_WAL_LEVEL || pCreate->walLevel > TSDB_MAX_WAL_LEVEL)) {
snprintf(msg, tListLen(msg), "invalid db option walLevel: %d, only 1-2 allowed", pCreate->walLevel);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (pCreate->replications != -1 &&
(pCreate->replications < TSDB_MIN_DB_REPLICA_OPTION || pCreate->replications > TSDB_MAX_DB_REPLICA_OPTION)) {
snprintf(msg, tListLen(msg), "invalid db option replications: %d valid range: [%d, %d]", pCreate->replications,
TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
+ }
+
+ int32_t blocks = ntohl(pCreate->totalBlocks);
+ if (blocks != -1 && (blocks < TSDB_MIN_TOTAL_BLOCKS || blocks > TSDB_MAX_TOTAL_BLOCKS)) {
+ snprintf(msg, tListLen(msg), "invalid db option totalBlocks: %d valid range: [%d, %d]", blocks,
+ TSDB_MIN_TOTAL_BLOCKS, TSDB_MAX_TOTAL_BLOCKS);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (pCreate->quorum != -1 &&
(pCreate->quorum < TSDB_MIN_DB_QUORUM_OPTION || pCreate->quorum > TSDB_MAX_DB_QUORUM_OPTION)) {
snprintf(msg, tListLen(msg), "invalid db option quorum: %d valid range: [%d, %d]", pCreate->quorum,
TSDB_MIN_DB_QUORUM_OPTION, TSDB_MAX_DB_QUORUM_OPTION);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t val = htonl(pCreate->daysPerFile);
if (val != -1 && (val < TSDB_MIN_DAYS_PER_FILE || val > TSDB_MAX_DAYS_PER_FILE)) {
snprintf(msg, tListLen(msg), "invalid db option daysPerFile: %d valid range: [%d, %d]", val,
TSDB_MIN_DAYS_PER_FILE, TSDB_MAX_DAYS_PER_FILE);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
val = htonl(pCreate->cacheBlockSize);
if (val != -1 && (val < TSDB_MIN_CACHE_BLOCK_SIZE || val > TSDB_MAX_CACHE_BLOCK_SIZE)) {
snprintf(msg, tListLen(msg), "invalid db option cacheBlockSize: %d valid range: [%d, %d]", val,
TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
val = htonl(pCreate->maxTables);
if (val != -1 && (val < TSDB_MIN_TABLES || val > TSDB_MAX_TABLES)) {
snprintf(msg, tListLen(msg), "invalid db option maxSessions: %d valid range: [%d, %d]", val,
TSDB_MIN_TABLES, TSDB_MAX_TABLES);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) {
- snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision,
- TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO &&
+ pCreate->precision != TSDB_TIME_PRECISION_NANO) {
+ snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d, %d]", pCreate->precision,
+ TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
val = htonl(pCreate->commitTime);
if (val != -1 && (val < TSDB_MIN_COMMIT_TIME || val > TSDB_MAX_COMMIT_TIME)) {
snprintf(msg, tListLen(msg), "invalid db option commitTime: %d valid range: [%d, %d]", val,
TSDB_MIN_COMMIT_TIME, TSDB_MAX_COMMIT_TIME);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
val = htonl(pCreate->fsyncPeriod);
if (val != -1 && (val < TSDB_MIN_FSYNC_PERIOD || val > TSDB_MAX_FSYNC_PERIOD)) {
snprintf(msg, tListLen(msg), "invalid db option fsyncPeriod: %d valid range: [%d, %d]", val,
TSDB_MIN_FSYNC_PERIOD, TSDB_MAX_FSYNC_PERIOD);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (pCreate->compression != -1 &&
(pCreate->compression < TSDB_MIN_COMP_LEVEL || pCreate->compression > TSDB_MAX_COMP_LEVEL)) {
snprintf(msg, tListLen(msg), "invalid db option compression: %d valid range: [%d, %d]", pCreate->compression,
TSDB_MIN_COMP_LEVEL, TSDB_MAX_COMP_LEVEL);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
val = (int16_t)htons(pCreate->partitions);
@@ -6372,7 +7320,7 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
(val < TSDB_MIN_DB_PARTITON_OPTION || val > TSDB_MAX_DB_PARTITON_OPTION)) {
snprintf(msg, tListLen(msg), "invalid topic option partition: %d valid range: [%d, %d]", val,
TSDB_MIN_DB_PARTITON_OPTION, TSDB_MAX_DB_PARTITON_OPTION);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
@@ -6380,10 +7328,10 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
}
// for debug purpose
-void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, subClauseIndex);
+void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex) {
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
- int32_t size = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t size = (int32_t)tscNumOfExprs(pQueryInfo);
if (size == 0) {
return;
}
@@ -6395,12 +7343,21 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
offset += sprintf(str, "num:%d [", size);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
char tmpBuf[1024] = {0};
int32_t tmpLen = 0;
+ char *name = NULL;
+
+ if (pExpr->base.functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * pExpr->base.functionId - 1);
+ name = pUdfInfo->name;
+ } else {
+ name = aAggs[pExpr->base.functionId].name;
+ }
+
tmpLen =
- sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].name, pExpr->uid, pExpr->colInfo.colId);
+ sprintf(tmpBuf, "%s(uid:%" PRIu64 ", %d)", name, pExpr->base.uid, pExpr->base.colInfo.colId);
if (tmpLen + offset >= totalBufSize - 1) break;
@@ -6422,7 +7379,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
const char* msg1 = "invalid table name";
SSqlCmd* pCmd = &pSql->cmd;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo;
@@ -6436,17 +7393,17 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
SStrToken* pzTableName = &(pCreateTable->name);
if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- int32_t code = tscSetTableFullName(pTableMetaInfo, pzTableName, pSql);
+ int32_t code = tscSetTableFullName(&pTableMetaInfo->name, pzTableName, pSql);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
if (!validateTableColumnInfo(pFieldList, pCmd) ||
(pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t col = 0;
@@ -6481,7 +7438,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
SSqlCmd* pCmd = &pSql->cmd;
SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
// two table: the first one is for current table, and the secondary is for the super table.
if (pQueryInfo->numOfTables < 2) {
@@ -6500,10 +7457,10 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
SStrToken* pToken = &pCreateTableInfo->stableName;
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- int32_t code = tscSetTableFullName(pStableMetaInfo, pToken, pSql);
+ int32_t code = tscSetTableFullName(&pStableMetaInfo->name, pToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -6521,6 +7478,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
// too long tag values will return invalid sql, not be truncated automatically
SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta);
+ STableComInfo tinfo = tscGetTableInfo(pStableMetaInfo->pTableMeta);
STagData *pTag = &pCreateTableInfo->tagdata;
SKVRowBuilder kvRowBuilder = {0};
@@ -6539,12 +7497,12 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (valSize != nameSize) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (schemaSize < valSize) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
bool findColumnIndex = false;
@@ -6564,11 +7522,20 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (strncmp(sToken->z, pTagSchema[t].name, sToken->n) == 0 && strlen(pTagSchema[t].name) == sToken->n) {
SSchema* pSchema = &pTagSchema[t];
- char tagVal[TSDB_MAX_TAGS_LEN];
+ char tagVal[TSDB_MAX_TAGS_LEN] = {0};
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
if (pItem->pVar.nLen > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ } else if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
+ if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) {
+ ret = convertTimestampStrToInt64(&(pItem->pVar), tinfo.precision);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+ } else if (pItem->pVar.nType == TSDB_DATA_TYPE_TIMESTAMP) {
+ pItem->pVar.i64 = convertTimePrecision(pItem->pVar.i64, TSDB_TIME_PRECISION_NANO, tinfo.precision);
}
}
@@ -6579,13 +7546,13 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
int16_t len = varDataTLen(tagVal);
if (len > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
if (ret != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
@@ -6597,13 +7564,13 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (!findColumnIndex) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken->z);
+ return tscInvalidOperationMsg(pCmd->payload, "invalid tag name", sToken->z);
}
}
} else {
if (schemaSize != valSize) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
for (int32_t i = 0; i < valSize; ++i) {
@@ -6614,10 +7581,20 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
if (pItem->pVar.nLen > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ } else if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
+ if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) {
+ ret = convertTimestampStrToInt64(&(pItem->pVar), tinfo.precision);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+ } else if (pItem->pVar.nType == TSDB_DATA_TYPE_TIMESTAMP) {
+ pItem->pVar.i64 = convertTimePrecision(pItem->pVar.i64, TSDB_TIME_PRECISION_NANO, tinfo.precision);
}
}
+
ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true);
// check again after the convert since it may be converted from binary to nchar.
@@ -6625,13 +7602,13 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
int16_t len = varDataTLen(tagVal);
if (len > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
if (ret != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
@@ -6655,11 +7632,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
// table name
if (tscValidateName(&(pCreateTableInfo->name)) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
- ret = tscSetTableFullName(pTableMetaInfo, &pCreateTableInfo->name, pSql);
+ ret = tscSetTableFullName(&pTableMetaInfo->name, &pCreateTableInfo->name, pSql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -6667,7 +7644,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
pCreateTableInfo->fullname = calloc(1, tNameLen(&pTableMetaInfo->name) + 1);
ret = tNameExtractFullName(&pTableMetaInfo->name, pCreateTableInfo->fullname);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -6683,9 +7660,9 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
const char* msg6 = "from missing in subclause";
const char* msg7 = "time interval is required";
const char* msg8 = "the first column should be primary timestamp column";
-
+
SSqlCmd* pCmd = &pSql->cmd;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
assert(pQueryInfo->numOfTables == 1);
SCreateTableSql* pCreateTable = pInfo->pCreateTableInfo;
@@ -6693,24 +7670,24 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
// if sql specifies db, use it, otherwise use default db
SStrToken* pName = &(pCreateTable->name);
- SQuerySqlNode* pQuerySqlNode = pCreateTable->pSelect;
+ SSqlNode* pSqlNode = pCreateTable->pSelect;
if (tscValidateName(pName) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- SFromInfo* pFromInfo = pInfo->pCreateTableInfo->pSelect->from;
- if (pFromInfo == NULL || taosArrayGetSize(pFromInfo->tableList) == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ SRelationInfo* pFromInfo = pInfo->pCreateTableInfo->pSelect->from;
+ if (pFromInfo == NULL || taosArrayGetSize(pFromInfo->list) == 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- STableNamePair* p1 = taosArrayGet(pFromInfo->tableList, 0);
- SStrToken srcToken = {.z = p1->name.z, .n = p1->name.n, .type = TK_STRING};
+ SRelElementPair* p1 = taosArrayGet(pFromInfo->list, 0);
+ SStrToken srcToken = {.z = p1->tableName.z, .n = p1->tableName.n, .type = TK_STRING};
if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- int32_t code = tscSetTableFullName(pTableMetaInfo, &srcToken, pSql);
+ int32_t code = tscSetTableFullName(&pTableMetaInfo->name, &srcToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -6720,82 +7697,71 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return code;
}
- bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
- if (parseSelectClause(&pSql->cmd, 0, pQuerySqlNode->pSelectList, isSTable, false, false) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (validateSelectNodeList(&pSql->cmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, false) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if (pQuerySqlNode->pWhere != NULL) { // query condition in stream computing
- if (parseWhereClause(pQueryInfo, &pQuerySqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (pSqlNode->pWhere != NULL) { // query condition in stream computing
+ if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
// set interval value
- if (parseIntervalClause(pSql, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// project query primary column must be timestamp type
if (tscIsProjectionQuery(pQueryInfo)) {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
- // check zero
- if(size == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
- }
-
- // check primary column is timestamp
- SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, 0);
- if(pSqlExpr == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
- }
- if( pSqlExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ if (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
} else {
if (pQueryInfo->interval.interval == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
- }
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
}
// set the created table[stream] name
- code = tscSetTableFullName(pTableMetaInfo, pName, pSql);
+ code = tscSetTableFullName(&pTableMetaInfo->name, pName, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- if (pQuerySqlNode->sqlstr.n > TSDB_MAX_SAVED_SQL_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ if (pSqlNode->sqlstr.n > TSDB_MAX_SAVED_SQL_LEN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (tsRewriteFieldNameIfNecessary(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
if (validateSqlFunctionInStreamSql(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
/*
* check if fill operation is available, the fill operation is parsed and executed during query execution,
* not here.
*/
- if (pQuerySqlNode->fillType != NULL) {
+ if (pSqlNode->fillType != NULL) {
if (pQueryInfo->interval.interval == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- tVariantListItem* pItem = taosArrayGet(pQuerySqlNode->fillType, 0);
+ tVariantListItem* pItem = taosArrayGet(pSqlNode->fillType, 0);
if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) {
if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) ||
(strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
}
@@ -6805,7 +7771,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return TSDB_CODE_SUCCESS;
}
-static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
+int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg3 = "start(end) time of query range required or time range too large";
if (pQueryInfo->interval.interval == 0) {
@@ -6814,7 +7780,7 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
if (initialWindows) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
@@ -6834,176 +7800,171 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
return TSDB_CODE_SUCCESS;
}
+// TODO normalize the function expression and compare it
+int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pSqlExpr, SExprInfo** pExpr) {
+ const char* msg1 = "invalid sql expression in having";
- int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) {
- tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false};
+ *pExpr = NULL;
+ size_t nx = tscNumOfExprs(pQueryInfo);
- int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
-
- // ADD TRUE FOR TEST
- if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ // parameters is needed for functions
+ if (pSqlExpr->Expr.paramList == NULL && pSqlExpr->functionId != TSDB_FUNC_COUNT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- ++pQueryInfo->havingFieldNum;
-
- size_t n = tscSqlExprNumOfExprs(pQueryInfo);
- SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1);
+ tSqlExprItem *pParam = NULL;
+ SSchema schema = {0};
- int32_t slot = tscNumOfFields(pQueryInfo) - 1;
- SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot);
- pInfo->visible = false;
-
- if (pInfo->pFieldFilters == NULL) {
- SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter));
- if (pFieldFilters == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- SColumn* pFilters = calloc(1, sizeof(SColumn));
- if (pFilters == NULL) {
- tfree(pFieldFilters);
-
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ if (pSqlExpr->Expr.paramList != NULL) {
+ pParam = taosArrayGet(pSqlExpr->Expr.paramList, 0);
+ SStrToken* pToken = &pParam->pNode->columnName;
- pFieldFilters->pFilters = pFilters;
- pFieldFilters->pSqlExpr = pSqlExpr;
- pSqlExpr->pFilter = pFilters;
- pInfo->pFieldFilters = pFieldFilters;
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+ getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ schema = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
+ } else {
+ schema = (SSchema) {.colId = PRIMARYKEY_TIMESTAMP_COL_INDEX, .type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE};
}
- pInfo->pFieldFilters->pExpr = pExpr;
-
- *interField = pInfo;
-
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) {
- SInternalField* pInfo = NULL;
-
- for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) {
- pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i);
-
- if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) {
- *pField = pInfo;
+ for(int32_t i = 0; i < nx; ++i) {
+ SExprInfo* pExprInfo = tscExprGet(pQueryInfo, i);
+ if (pExprInfo->base.functionId == pSqlExpr->functionId && pExprInfo->base.colInfo.colId == schema.colId) {
+ ++pQueryInfo->havingFieldNum;
+ *pExpr = pExprInfo;
return TSDB_CODE_SUCCESS;
}
}
- int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo);
- if (ret) {
- return ret;
- }
-
- *pField = pInfo;
+// size_t num = taosArrayGetSize(pSelectNodeList);
+// for(int32_t i = 0; i < num; ++i) {
+// tSqlExprItem* pItem = taosArrayGet(pSelectNodeList, i);
+//
+// if (tSqlExprCompare(pItem->pNode, pSqlExpr) == 0) { // exists, not added it,
+//
+// SColumnIndex index = COLUMN_INDEX_INITIALIZER;
+// int32_t functionId = pSqlExpr->functionId;
+// if (pSqlExpr->Expr.paramList == NULL) {
+// index.columnIndex = 0;
+// index.tableIndex = 0;
+// } else {
+// tSqlExprItem* pParamElem = taosArrayGet(pSqlExpr->Expr.paramList, 0);
+// SStrToken* pToken = &pParamElem->pNode->columnName;
+// getColumnIndexByName(pToken, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
+// }
+//
+// size_t numOfNodeInSel = tscNumOfExprs(pQueryInfo);
+// for(int32_t k = 0; k < numOfNodeInSel; ++k) {
+// SExprInfo* pExpr1 = tscExprGet(pQueryInfo, k);
+//
+// if (pExpr1->base.functionId != functionId) {
+// continue;
+// }
+//
+// if (pExpr1->base.colInfo.colIndex != index.columnIndex) {
+// continue;
+// }
+//
+// ++pQueryInfo->havingFieldNum;
+// *pExpr = pExpr1;
+// break;
+// }
+//
+// assert(*pExpr != NULL);
+// return TSDB_CODE_SUCCESS;
+// }
+// }
- return TSDB_CODE_SUCCESS;
-}
+ tSqlExprItem item = {.pNode = pSqlExpr, .aliasName = NULL, .distinct = false};
-static int32_t genExprFilter(SExprFilter * exprFilter) {
- exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t));
- if (exprFilter->fp == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
- for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) {
- SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i];
+ // ADD TRUE FOR TEST
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true, NULL) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
- int32_t lower = filterInfo->lowerRelOptr;
- int32_t upper = filterInfo->upperRelOptr;
- if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) {
- tscError("invalid rel optr");
- return TSDB_CODE_TSC_APP_ERROR;
- }
+ ++pQueryInfo->havingFieldNum;
- __filter_func_t ffp = getFilterOperator(lower, upper);
- if (ffp == NULL) {
- tscError("invalid filter info");
- return TSDB_CODE_TSC_APP_ERROR;
- }
+ size_t n = tscNumOfExprs(pQueryInfo);
+ *pExpr = tscExprGet(pQueryInfo, (int32_t)n - 1);
- taosArrayPush(exprFilter->fp, &ffp);
- }
+ SInternalField* pField = taosArrayGetLast(pQueryInfo->fieldsInfo.internalField);
+ pField->visible = false;
return TSDB_CODE_SUCCESS;
}
-static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) {
+static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t sqlOptr) {
const char* msg1 = "non binary column not support like operator";
- const char* msg2 = "invalid operator for binary column in having clause";
+ const char* msg2 = "invalid operator for binary column in having clause";
const char* msg3 = "invalid operator for bool column in having clause";
- SColumn* pColumn = NULL;
SColumnFilterInfo* pColFilter = NULL;
- SInternalField* pInfo = NULL;
-
+ // TODO refactor: validate the expression
/*
* in case of TK_AND filter condition, we first find the corresponding column and build the query condition together
* the already existed condition.
*/
+ SExprInfo *expr = NULL;
if (sqlOptr == TK_AND) {
- int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
+ int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, &expr);
if (ret) {
return ret;
}
- pColumn = pInfo->pFieldFilters->pFilters;
-
// this is a new filter condition on this column
- if (pColumn->numOfFilters == 0) {
- pColFilter = addColumnFilterInfo(pColumn);
+ if (expr->base.flist.numOfFilters == 0) {
+ pColFilter = addColumnFilterInfo(&expr->base.flist);
} else { // update the existed column filter information, find the filter info here
- pColFilter = &pColumn->filterInfo[0];
+ pColFilter = &expr->base.flist.filterInfo[0];
}
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else if (sqlOptr == TK_OR) {
- int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo);
+ int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, &expr);
if (ret) {
return ret;
}
- pColumn = pInfo->pFieldFilters->pFilters;
-
// TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2"
- pColFilter = addColumnFilterInfo(pColumn);
+ // TODO refactor
+ pColFilter = addColumnFilterInfo(&expr->base.flist);
if (pColFilter == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
} else { // error;
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
pColFilter->filterstr =
- ((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
+ ((expr->base.resType == TSDB_DATA_TYPE_BINARY || expr->base.resType == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
if (pColFilter->filterstr) {
if (pExpr->tokenId != TK_EQ
- && pExpr->tokenId != TK_NE
- && pExpr->tokenId != TK_ISNULL
- && pExpr->tokenId != TK_NOTNULL
- && pExpr->tokenId != TK_LIKE
- ) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ && pExpr->tokenId != TK_NE
+ && pExpr->tokenId != TK_ISNULL
+ && pExpr->tokenId != TK_NOTNULL
+ && pExpr->tokenId != TK_LIKE
+ ) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->tokenId == TK_LIKE) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) {
+
+ if (expr->base.resType == TSDB_DATA_TYPE_BOOL) {
if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
@@ -7011,15 +7972,16 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
- int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pTableMeta, pColFilter, pInfo->field.type, pExpr);
+ int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pTableMeta->tableInfo.precision, pColFilter,
+ expr->base.resType, pExpr);
if (ret) {
- return ret;
+ return ret;
}
-
- return genExprFilter(pInfo->pFieldFilters);
+
+ return TSDB_CODE_SUCCESS;
}
-int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) {
+int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -7030,88 +7992,83 @@ int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, in
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) {
- int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId);
+ int32_t ret = getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr->pLeft, pExpr->tokenId);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId);
+ return getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr->pRight, pExpr->tokenId);
}
if (pLeft == NULL || pRight == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pLeft->type == pRight->type) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
exchangeExpr(pExpr);
- pLeft = pExpr->pLeft;
+ pLeft = pExpr->pLeft;
pRight = pExpr->pRight;
-
-
if (pLeft->type != SQL_NODE_SQLFUNCTION) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
+
if (pRight->type != SQL_NODE_VALUE) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->tokenId >= TK_BITAND) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- //if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) {
- // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- //}
-
- if (pLeft->pParam) {
- size_t size = taosArrayGetSize(pLeft->pParam);
+ if (pLeft->Expr.paramList) {
+ size_t size = taosArrayGetSize(pLeft->Expr.paramList);
for (int32_t i = 0; i < size; i++) {
- tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i);
- if (pParamElem->pNode->tokenId != TK_ALL &&
- pParamElem->pNode->tokenId != TK_ID &&
- pParamElem->pNode->tokenId != TK_STRING &&
- pParamElem->pNode->tokenId != TK_INTEGER &&
- pParamElem->pNode->tokenId != TK_FLOAT) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ tSqlExprItem* pParamItem = taosArrayGet(pLeft->Expr.paramList, i);
+
+ tSqlExpr* pExpr1 = pParamItem->pNode;
+ if (pExpr1->tokenId != TK_ALL &&
+ pExpr1->tokenId != TK_ID &&
+ pExpr1->tokenId != TK_STRING &&
+ pExpr1->tokenId != TK_INTEGER &&
+ pExpr1->tokenId != TK_FLOAT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
- if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+
+ if (pExpr1->tokenId == TK_ID && (pExpr1->columnName.z == NULL && pExpr1->columnName.n == 0)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- if (pParamElem->pNode->tokenId == TK_ID) {
+ if (pExpr1->tokenId == TK_ID) {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if ((getColumnIndexByName(&pExpr1->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
-
+
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
-
- if (index.columnIndex <= 0 ||
- index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+
+ if (index.columnIndex <= 0 ||
+ index.columnIndex >= tscGetNumOfColumns(pTableMeta)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
}
}
- pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n);
+ pLeft->functionId = isValidFunction(pLeft->Expr.operand.z, pLeft->Expr.operand.n);
if (pLeft->functionId < 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr);
+ return handleExprInHavingClause(pCmd, pQueryInfo, pSelectNodeList, pExpr, parentOptr);
}
-
-
-int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) {
+int32_t validateHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, SArray* pSelectNodeList,
+ int32_t joinQuery, int32_t timeWindowQuery) {
const char* msg1 = "having only works with group by";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "invalid expression in having clause";
@@ -7121,11 +8078,11 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd
}
if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pExpr->pLeft == NULL || pExpr->pRight == NULL) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->colList == NULL) {
@@ -7133,277 +8090,824 @@ int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd
}
int32_t ret = 0;
-
- if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
+
+ if ((ret = getHavingExpr(pCmd, pQueryInfo, pSelectNodeList, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
//REDO function check
if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
}
+static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList, char* msgBuf, SSqlObj* pSql) {
+ const char* msg1 = "invalid table name";
+ int32_t numOfTables = (int32_t) taosArrayGetSize(pSqlNode->from->list);
+ assert(pSqlNode->from->type == SQL_NODE_FROM_TABLELIST);
+ for(int32_t j = 0; j < numOfTables; ++j) {
+ SRelElementPair* item = taosArrayGet(pSqlNode->from->list, j);
+ SStrToken* t = &item->tableName;
+ if (t->type == TK_INTEGER || t->type == TK_FLOAT) {
+ return invalidOperationMsg(msgBuf, msg1);
+ }
-int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) {
- assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0));
-
- const char* msg0 = "invalid table name";
- const char* msg1 = "point interpolation query needs timestamp";
- const char* msg2 = "fill only available for interval query";
- const char* msg3 = "start(end) time of query range required or time range too large";
- const char* msg5 = "too many columns in selection clause";
- const char* msg6 = "too many tables in from clause";
- const char* msg7 = "invalid table alias name";
- const char* msg8 = "alias name too long";
- const char* msg9 = "only tag query not compatible with normal column filter";
-
- int32_t code = TSDB_CODE_SUCCESS;
+ tscDequoteAndTrimToken(t);
+ if (tscValidateName(t) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(msgBuf, msg1);
+ }
- SSqlCmd* pCmd = &pSql->cmd;
+ SName name = {0};
+ int32_t code = tscSetTableFullName(&name, t, pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index);
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- if (pTableMetaInfo == NULL) {
- pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
+ taosArrayPush(tableNameList, &name);
}
- assert(pCmd->clauseIndex == index);
+ return TSDB_CODE_SUCCESS;
+}
- // too many result columns not support order by in query
- if (taosArrayGetSize(pQuerySqlNode->pSelectList) > TSDB_MAX_COLUMNS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
- }
+static int32_t getTableNameFromSubquery(SSqlNode* pSqlNode, SArray* tableNameList, char* msgBuf, SSqlObj* pSql) {
+ int32_t numOfSub = (int32_t) taosArrayGetSize(pSqlNode->from->list);
- /*
- * handle the sql expression without from subclause
- * select current_database();
- * select server_version();
- * select client_version();
- * select server_state();
- */
- if (pQuerySqlNode->from == NULL) {
- assert(pQuerySqlNode->fillType == NULL && pQuerySqlNode->pGroupby == NULL && pQuerySqlNode->pWhere == NULL &&
- pQuerySqlNode->pSortOrder == NULL);
- return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySqlNode);
+ for(int32_t j = 0; j < numOfSub; ++j) {
+ SRelElementPair* sub = taosArrayGet(pSqlNode->from->list, j);
+
+ int32_t num = (int32_t)taosArrayGetSize(sub->pSubquery);
+ for (int32_t i = 0; i < num; ++i) {
+ SSqlNode* p = taosArrayGetP(sub->pSubquery, i);
+ if (p->from == NULL) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (p->from->type == SQL_NODE_FROM_TABLELIST) {
+ int32_t code = getTableNameFromSqlNode(p, tableNameList, msgBuf, pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ } else {
+ getTableNameFromSubquery(p, tableNameList, msgBuf, pSql);
+ }
+ }
}
- size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList);
- if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return TSDB_CODE_SUCCESS;
+}
+
+void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
+static void freeElem(void* p) {
+ tfree(*(char**)p);
+}
+
+int32_t tnameComparFn(const void* p1, const void* p2) {
+ SName* pn1 = (SName*)p1;
+ SName* pn2 = (SName*)p2;
+
+ int32_t ret = strncmp(pn1->acctId, pn2->acctId, tListLen(pn1->acctId));
+ if (ret != 0) {
+ return ret > 0? 1:-1;
+ } else {
+ ret = strncmp(pn1->dbname, pn2->dbname, tListLen(pn1->dbname));
+ if (ret != 0) {
+ return ret > 0? 1:-1;
+ } else {
+ ret = strncmp(pn1->tname, pn2->tname, tListLen(pn1->tname));
+ if (ret != 0) {
+ return ret > 0? 1:-1;
+ } else {
+ return 0;
+ }
+ }
+ }
+}
+
+int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ // the table meta has already been loaded from local buffer or mnode already
+ if (pCmd->pTableMetaMap != NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ SArray* tableNameList = NULL;
+ SArray* pVgroupList = NULL;
+ SArray* plist = NULL;
+ STableMeta* pTableMeta = NULL;
+ size_t tableMetaCapacity = 0;
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+
+ pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+
+ tableNameList = taosArrayInit(4, sizeof(SName));
+ size_t size = taosArrayGetSize(pInfo->list);
+ for (int32_t i = 0; i < size; ++i) {
+ SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i);
+ if (pSqlNode->from == NULL) {
+ goto _end;
+ }
+
+ // load the table meta in the from clause
+ if (pSqlNode->from->type == SQL_NODE_FROM_TABLELIST) {
+ code = getTableNameFromSqlNode(pSqlNode, tableNameList, tscGetErrorMsgPayload(pCmd), pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
+ }
+ } else {
+ code = getTableNameFromSubquery(pSqlNode, tableNameList, tscGetErrorMsgPayload(pCmd), pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
+ }
+ }
}
- pQueryInfo->command = TSDB_SQL_SELECT;
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+
+ //if (!pSql->pBuf) {
+ // if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) {
+ // code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ // goto _end;
+ // }
+ //}
+
+ plist = taosArrayInit(4, POINTER_BYTES);
+ pVgroupList = taosArrayInit(4, POINTER_BYTES);
+
+ taosArraySort(tableNameList, tnameComparFn);
+ taosArrayRemoveDuplicate(tableNameList, tnameComparFn, NULL);
- // set all query tables, which are maybe more than one.
- for (int32_t i = 0; i < fromSize; ++i) {
- STableNamePair* item = taosArrayGet(pQuerySqlNode->from->tableList, i);
- SStrToken* pTableItem = &item->name;
+ size_t numOfTables = taosArrayGetSize(tableNameList);
+ for (int32_t i = 0; i < numOfTables; ++i) {
+ SName* pname = taosArrayGet(tableNameList, i);
+ tNameExtractFullName(pname, name);
- if (pTableItem->type != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ size_t len = strlen(name);
+
+ if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity)){
+ tfree(pTableMeta);
+ tableMetaCapacity = 0;
}
- tscDequoteAndTrimToken(pTableItem);
+ if (pTableMeta && pTableMeta->id.uid > 0) {
+ tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
+
+ // avoid mem leak, may should update pTableMeta
+ void* pVgroupIdList = NULL;
+ if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
+ code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity);
+
+ // create the child table meta from super table failed, try load it from mnode
+ if (code != TSDB_CODE_SUCCESS) {
+ char* t = strdup(name);
+ taosArrayPush(plist, &t);
+ continue;
+ }
+ } else if (pTableMeta->tableType == TSDB_SUPER_TABLE) {
+ // the vgroup list of super table is not kept in local buffer, so here need retrieve it from the mnode each time
+ tscDebug("0x%"PRIx64" try to acquire cached super table %s vgroup id list", pSql->self, name);
+ void* pv = taosCacheAcquireByKey(tscVgroupListBuf, name, len);
+ if (pv == NULL) {
+ char* t = strdup(name);
+ taosArrayPush(pVgroupList, &t);
+ tscDebug("0x%"PRIx64" failed to retrieve stable %s vgroup id list in cache, try fetch from mnode", pSql->self, name);
+ } else {
+ tFilePage* pdata = (tFilePage*) pv;
+ pVgroupIdList = taosArrayInit((size_t) pdata->num, sizeof(int32_t));
+ if (pVgroupIdList == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ taosArrayAddBatch(pVgroupIdList, pdata->data, (int32_t) pdata->num);
+ taosCacheRelease(tscVgroupListBuf, &pv, false);
+ }
+ }
- SStrToken tableName = {.z = pTableItem->z, .n = pTableItem->n, .type = TK_STRING};
- if (tscValidateName(&tableName) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ if (taosHashGet(pCmd->pTableMetaMap, name, len) == NULL) {
+ STableMeta* pMeta = tscTableMetaDup(pTableMeta);
+ STableMetaVgroupInfo tvi = { .pTableMeta = pMeta, .vgroupIdList = pVgroupIdList};
+ taosHashPut(pCmd->pTableMetaMap, name, len, &tvi, sizeof(STableMetaVgroupInfo));
+ }
+ } else {
+ // Add to the retrieve table meta array list.
+ // If the tableMeta is missing, the cached vgroup list for the corresponding super table will be ignored.
+ tscDebug("0x%"PRIx64" failed to retrieve table meta %s from local buf", pSql->self, name);
+
+ char* t = strdup(name);
+ taosArrayPush(plist, &t);
}
+ }
+ size_t funcSize = 0;
+ if (pInfo->funcs) {
+ funcSize = taosArrayGetSize(pInfo->funcs);
+ }
+
+ if (funcSize > 0) {
+ for (size_t i = 0; i < funcSize; ++i) {
+ SStrToken* t = taosArrayGet(pInfo->funcs, i);
+ if (NULL == t) {
+ continue;
+ }
+
+ if (t->n >= TSDB_FUNC_NAME_LEN) {
+ code = tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), "too long function name", t->z);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
+ }
+ }
+
+ int32_t functionId = isValidFunction(t->z, t->n);
+ if (functionId < 0) {
+ struct SUdfInfo info = {0};
+ info.name = strndup(t->z, t->n);
+ if (pQueryInfo->pUdfInfo == NULL) {
+ pQueryInfo->pUdfInfo = taosArrayInit(4, sizeof(struct SUdfInfo));
+ }
+
+ info.functionId = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo) * (-1) - 1;;
+ taosArrayPush(pQueryInfo->pUdfInfo, &info);
+ }
+ }
+ }
+
+ // load the table meta for a given table name list
+ if (taosArrayGetSize(plist) > 0 || taosArrayGetSize(pVgroupList) > 0 || (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0)) {
+ code = getMultiTableMetaFromMnode(pSql, plist, pVgroupList, pQueryInfo->pUdfInfo, tscTableMetaCallBack, true);
+ }
+
+_end:
+ if (plist != NULL) {
+ taosArrayDestroyEx(plist, freeElem);
+ }
+
+ if (pVgroupList != NULL) {
+ taosArrayDestroyEx(pVgroupList, freeElem);
+ }
+
+ if (tableNameList != NULL) {
+ taosArrayDestroy(tableNameList);
+ }
+
+ tfree(pTableMeta);
+
+ return code;
+}
+
+static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, int32_t numOfTables) {
+ const char* msg1 = "invalid table name";
+ const char* msg2 = "invalid table alias name";
+ const char* msg3 = "alias name too long";
+ const char* msg4 = "self join not allowed";
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ if (numOfTables > taosHashGetSize(pCmd->pTableMetaMap)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ for (int32_t i = 0; i < numOfTables; ++i) {
if (pQueryInfo->numOfTables <= i) { // more than one table
tscAddEmptyMetaInfo(pQueryInfo);
}
- STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, i);
- code = tscSetTableFullName(pTableMetaInfo1, pTableItem, pSql);
+ SRelElementPair *item = taosArrayGet(pSqlNode->from->list, i);
+ SStrToken *oriName = &item->tableName;
+
+ if (oriName->type == TK_INTEGER || oriName->type == TK_FLOAT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ tscDequoteAndTrimToken(oriName);
+ if (tscValidateName(oriName) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
+ code = tscSetTableFullName(&pTableMetaInfo->name, oriName, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
SStrToken* aliasName = &item->aliasName;
if (TPARSER_HAS_TOKEN(*aliasName)) {
- if (aliasName->type != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ if (aliasName->type == TK_INTEGER || aliasName->type == TK_FLOAT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tscDequoteAndTrimToken(aliasName);
-
- SStrToken aliasName1 = {.z = aliasName->z, .n = aliasName->n, .type = TK_STRING};
- if (tscValidateName(&aliasName1) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ if (tscValidateName(aliasName) != TSDB_CODE_SUCCESS || aliasName->n >= TSDB_TABLE_NAME_LEN) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- if (aliasName1.n >= TSDB_TABLE_NAME_LEN) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
- }
-
- strncpy(pTableMetaInfo1->aliasName, aliasName1.z, aliasName1.n);
+ strncpy(pTableMetaInfo->aliasName, aliasName->z, aliasName->n);
} else {
- strncpy(pTableMetaInfo1->aliasName, tNameGetTableName(&pTableMetaInfo1->name), tListLen(pTableMetaInfo1->aliasName));
+ strncpy(pTableMetaInfo->aliasName, tNameGetTableName(&pTableMetaInfo->name), tListLen(pTableMetaInfo->aliasName));
}
- code = tscGetTableMeta(pSql, pTableMetaInfo1);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ char fname[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, fname);
+ STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN));
+
+ pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta);
+ pTableMetaInfo->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo->pTableMeta);
+ assert(pTableMetaInfo->pTableMeta != NULL);
+
+ if (p->vgroupIdList != NULL) {
+ size_t s = taosArrayGetSize(p->vgroupIdList);
+
+ size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo);
+ pTableMetaInfo->vgroupList = calloc(1, vgroupsz);
+ if (pTableMetaInfo->vgroupList == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pTableMetaInfo->vgroupList->numOfVgroups = (int32_t) s;
+ for(int32_t j = 0; j < s; ++j) {
+ int32_t* id = taosArrayGet(p->vgroupIdList, j);
+
+ // check if current buffer contains the vgroup info. If not, add it
+ SNewVgroupInfo existVgroupInfo = {.inUse = -1,};
+ taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo);
+
+ assert(existVgroupInfo.inUse >= 0);
+ SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j];
+
+ pVgroup->numOfEps = existVgroupInfo.numOfEps;
+ pVgroup->vgId = existVgroupInfo.vgId;
+ for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) {
+ pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port;
+ pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN);
+ }
+ }
}
}
- assert(pQueryInfo->numOfTables == taosArrayGetSize(pQuerySqlNode->from->tableList));
- bool isSTable = false;
-
- if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- isSTable = true;
- code = tscGetSTableVgroupInfo(pSql, index);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ return code;
+}
+
+static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
+ STableMetaInfo* pUpstreamTableMetaInfo = tscGetMetaInfo(pUpstream, 0);
+
+ int32_t numOfColumns = pUpstream->fieldsInfo.numOfOutput;
+ STableMeta *meta = calloc(1, sizeof(STableMeta) + sizeof(SSchema) * numOfColumns);
+ meta->tableType = TSDB_TEMP_TABLE;
+
+ STableComInfo *info = &meta->tableInfo;
+ info->numOfColumns = numOfColumns;
+ info->precision = pUpstreamTableMetaInfo->pTableMeta->tableInfo.precision;
+ info->numOfTags = 0;
+
+ int32_t n = 0;
+ for(int32_t i = 0; i < numOfColumns; ++i) {
+ SInternalField* pField = tscFieldInfoGetInternalField(&pUpstream->fieldsInfo, i);
+ if (!pField->visible) {
+ continue;
}
-
- TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_QUERY);
- } else {
- TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY);
- }
- // parse the group by clause in the first place
- if (parseGroupbyClause(pQueryInfo, pQuerySqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ meta->schema[n].bytes = pField->field.bytes;
+ meta->schema[n].type = pField->field.type;
+
+ SExprInfo* pExpr = pField->pExpr;
+ meta->schema[n].colId = pExpr->base.resColId;
+ tstrncpy(meta->schema[n].name, pField->pExpr->base.aliasName, TSDB_COL_NAME_LEN);
+ info->rowSize += meta->schema[n].bytes;
+
+ n += 1;
}
+ info->numOfColumns = n;
+ return meta;
+}
- // set where info
- STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pSql, SQueryInfo* pQueryInfo, char* msgBuf) {
+ SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, index);
- if (pQuerySqlNode->pWhere != NULL) {
- if (parseWhereClause(pQueryInfo, &pQuerySqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
- }
+ // union all is not support currently
+ SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
+ if (taosArrayGetSize(subInfo->pSubquery) >= 2) {
+ return invalidOperationMsg(msgBuf, "not support union in subquery");
+ }
- pQuerySqlNode->pWhere = NULL;
- if (tinfo.precision == TSDB_TIME_PRECISION_MILLI && (!TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER))) {
- pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
- pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
- }
- } else { // set the time rang
- if (taosArrayGetSize(pQuerySqlNode->from->tableList) > 1) { // it is a join query, no where clause is not allowed.
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "condition missing for join query ");
- }
+ SQueryInfo* pSub = calloc(1, sizeof(SQueryInfo));
+ tscInitQueryInfo(pSub);
+
+ SArray *pUdfInfo = NULL;
+ if (pQueryInfo->pUdfInfo) {
+ pUdfInfo = taosArrayDup(pQueryInfo->pUdfInfo);
}
- int32_t joinQuery = (pQuerySqlNode->from != NULL && taosArrayGetSize(pQuerySqlNode->from->tableList) > 1);
- int32_t timeWindowQuery =
- (TPARSER_HAS_TOKEN(pQuerySqlNode->interval.interval) || TPARSER_HAS_TOKEN(pQuerySqlNode->sessionVal.gap));
+ pSub->pUdfInfo = pUdfInfo;
+ pSub->udfCopy = true;
- if (parseSelectClause(pCmd, index, pQuerySqlNode->pSelectList, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ pSub->pDownstream = pQueryInfo;
+ taosArrayPush(pQueryInfo->pUpstream, &pSub);
+ int32_t code = validateSqlNode(pSql, p, pSub);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
- // set order by info
- if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ // create dummy table meta info
+ STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
+ if (pTableMetaInfo1 == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- // set interval value
- if (parseIntervalClause(pSql, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
- } else {
- if (isTimeWindowQuery(pQueryInfo) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
+ pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
+
+ if (subInfo->aliasName.n > 0) {
+ if (subInfo->aliasName.n >= TSDB_TABLE_FNAME_LEN) {
+ tfree(pTableMetaInfo1);
+ return invalidOperationMsg(msgBuf, "subquery alias name too long");
}
+
+ tstrncpy(pTableMetaInfo1->aliasName, subInfo->aliasName.z, subInfo->aliasName.n + 1);
}
- // parse the having clause in the first place
- if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ // NOTE: order mix up in subquery not support yet.
+ pQueryInfo->order = pSub->order;
+
+ STableMetaInfo** tmp = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
+ if (tmp == NULL) {
+ tfree(pTableMetaInfo1);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pQueryInfo->pTableMetaInfo = tmp;
+
+ pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo1;
+ pQueryInfo->numOfTables += 1;
+
+ // all columns are added into the table column list
+ STableMeta* pMeta = pTableMetaInfo1->pTableMeta;
+ int32_t startOffset = (int32_t) taosArrayGetSize(pQueryInfo->colList);
+
+ for(int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) {
+ tscColumnListInsert(pQueryInfo->colList, i + startOffset, pMeta->id.uid, &pMeta->schema[i]);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInfo) {
+ assert(pSqlNode != NULL && (pSqlNode->from == NULL || taosArrayGetSize(pSqlNode->from->list) > 0));
+
+ const char* msg1 = "point interpolation query needs timestamp";
+ const char* msg2 = "too many tables in from clause";
+ const char* msg3 = "start(end) time of query range required or time range too large";
+ const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
+ const char* msg5 = "only tag query not compatible with normal column filter";
+ const char* msg6 = "not support stddev/percentile/interp in the outer query yet";
+ const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery";
+ const char* msg8 = "condition missing for join query";
+ const char* msg9 = "not support 3 level select";
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ if (pTableMetaInfo == NULL) {
+ pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
}
/*
- * transfer sql functions that need secondary merge into another format
- * in dealing with super table queries such as: count/first/last
+ * handle the sql expression without from subclause
+ * select server_status();
+ * select server_version();
+ * select client_version();
+ * select current_database();
*/
- if (isSTable) {
- tscTansformFuncForSTableQuery(pQueryInfo);
-
- if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (pSqlNode->from == NULL) {
+ assert(pSqlNode->fillType == NULL && pSqlNode->pGroupby == NULL && pSqlNode->pWhere == NULL &&
+ pSqlNode->pSortOrder == NULL);
+ return doLocalQueryProcess(pCmd, pQueryInfo, pSqlNode);
+ }
+
+ if (pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
+ clearAllTableMetaInfo(pQueryInfo, false);
+ pQueryInfo->numOfTables = 0;
+
+ // parse the subquery in the first place
+ int32_t numOfSub = (int32_t)taosArrayGetSize(pSqlNode->from->list);
+ for (int32_t i = 0; i < numOfSub; ++i) {
+ // check if there is 3 level select
+ SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
+ SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
+ if (p->from->type == SQL_NODE_FROM_SUBQUERY) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ }
+
+ code = doValidateSubquery(pSqlNode, i, pSql, pQueryInfo, tscGetErrorMsgPayload(pCmd));
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+
+ int32_t timeWindowQuery =
+ (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap));
+ TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TABLE_QUERY);
+
+ // parse the group by clause in the first place
+ if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, timeWindowQuery, true) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ // todo NOT support yet
+ for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int32_t f = pExpr->base.functionId;
+ if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT || f == TSDB_FUNC_INTERP) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+
+ if ((timeWindowQuery || pQueryInfo->stateWindow) && f == TSDB_FUNC_LAST) {
+ pExpr->base.numOfParams = 1;
+ pExpr->base.param[0].i64 = TSDB_ORDER_ASC;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT;
+ }
+ }
+
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, 0);
+
+ if (pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) {
+ int32_t numOfExprs = (int32_t)tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+
+ int32_t f = pExpr->base.functionId;
+ if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
+ }
+ }
+
+ // validate the query filter condition info
+ if (pSqlNode->pWhere != NULL) {
+ if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ } else {
+ if (pQueryInfo->numOfTables > 1) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
+ }
+ }
+
+ // validate the interval info
+ if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ } else {
+ if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ // parse the window_state
+ if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (isTimeWindowQuery(pQueryInfo)) {
+ // check if the first column of the nest query result is timestamp column
+ SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
+ if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
+ if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ }
+ }
+
+ // disable group result mixed up if interval/session window query exists.
+ if (isTimeWindowQuery(pQueryInfo)) {
+ size_t num = taosArrayGetSize(pQueryInfo->pUpstream);
+ for(int32_t i = 0; i < num; ++i) {
+ SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, i);
+ pUp->multigroupResult = false;
+ }
+ }
+
+ // parse the having clause in the first place
+ int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
+ if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if ((code = validateLimitNode(pCmd, pQueryInfo, pSqlNode, pSql)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // set order by info
+ if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if ((code = validateFunctionFromUpstream(pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+// updateFunctionInterBuf(pQueryInfo, false);
+ updateLastScanOrderIfNeeded(pQueryInfo);
+
+ if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ } else {
+ pQueryInfo->command = TSDB_SQL_SELECT;
+
+ size_t numOfTables = taosArrayGetSize(pSqlNode->from->list);
+ if (numOfTables > TSDB_MAX_JOIN_TABLE_NUM) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
+ // set all query tables, which are maybe more than one.
+ code = doLoadAllTableMeta(pSql, pQueryInfo, pSqlNode, (int32_t) numOfTables);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
+
+ int32_t type = isSTable? TSDB_QUERY_TYPE_STABLE_QUERY:TSDB_QUERY_TYPE_TABLE_QUERY;
+ TSDB_QUERY_SET_TYPE(pQueryInfo->type, type);
+
+ // parse the group by clause in the first place
+ if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ pQueryInfo->onlyHasTagCond = true;
+ // set where info
+ if (pSqlNode->pWhere != NULL) {
+ if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ pSqlNode->pWhere = NULL;
+ } else {
+ if (taosArrayGetSize(pSqlNode->from->list) > 1) { // Cross join not allowed yet
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "cross join not supported yet");
+ }
}
- if(tscQueryTags(pQueryInfo)) {
- SSqlExpr* pExpr1 = tscSqlExprGet(pQueryInfo, 0);
+ int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
+ int32_t timeWindowQuery =
+ (TPARSER_HAS_TOKEN(pSqlNode->interval.interval) || TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap));
- if (pExpr1->functionId != TSDB_FUNC_TID_TAG) {
+ if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery, false) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (isSTable && tscQueryTags(pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ // parse the window_state
+ if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, isSTable) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ // set order by info
+ if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMetaInfo->pTableMeta)) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ // set interval value
+ if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (tscQueryTags(pQueryInfo)) {
+ SExprInfo* pExpr1 = tscExprGet(pQueryInfo, 0);
+
+ if (pExpr1->base.functionId != TSDB_FUNC_TID_TAG) {
int32_t numOfCols = (int32_t)taosArrayGetSize(pQueryInfo->colList);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
- if (pCols->numOfFilters > 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ if (pCols->info.flist.numOfFilters > 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
}
}
}
- }
- if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
- }
+ // parse the having clause in the first place
+ if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
+ TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
- // no result due to invalid query time range
- if (pQueryInfo->window.skey > pQueryInfo->window.ekey) {
- pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
- return TSDB_CODE_SUCCESS;
- }
+ /*
+ * transfer sql functions that need secondary merge into another format
+ * in dealing with super table queries such as: count/first/last
+ */
+ if (validateSessionNode(pCmd, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
- if (!hasTimestampForPointInterpQuery(pQueryInfo)) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
+ if (isTimeWindowQuery(pQueryInfo) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
- // in case of join query, time range is required.
- if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
- int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
- if (timeRange == 0 && pQueryInfo->window.skey == 0) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ if (isSTable) {
+ tscTansformFuncForSTableQuery(pQueryInfo);
+ if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
}
- }
- if ((code = parseLimitClause(pCmd, pQueryInfo, index, pQuerySqlNode, pSql)) != TSDB_CODE_SUCCESS) {
- return code;
- }
+ // no result due to invalid query time range
+ if (pQueryInfo->window.skey > pQueryInfo->window.ekey) {
+ pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
+ return TSDB_CODE_SUCCESS;
+ }
- if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) {
- return code;
- }
+ if (!hasTimestampForPointInterpQuery(pQueryInfo)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
- updateLastScanOrderIfNeeded(pQueryInfo);
- tscFieldInfoUpdateOffset(pQueryInfo);
+ // in case of join query, time range is required.
+ if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
+ uint64_t timeRange = (uint64_t)pQueryInfo->window.ekey - pQueryInfo->window.skey;
+ if (timeRange == 0 && pQueryInfo->window.skey == 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ }
+ }
- if (pQuerySqlNode->fillType != NULL) {
- if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ if ((code = validateLimitNode(pCmd, pQueryInfo, pSqlNode, pSql)) != TSDB_CODE_SUCCESS) {
+ return code;
}
- /*
- * fill options are set at the end position, when all columns are set properly
- * the columns may be increased due to group by operation
- */
- if ((code = checkQueryRangeForFill(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) {
+ if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo,tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
return code;
}
- if ((code = parseFillClause(pCmd, pQueryInfo, pQuerySqlNode)) != TSDB_CODE_SUCCESS) {
+ updateLastScanOrderIfNeeded(pQueryInfo);
+ tscFieldInfoUpdateOffset(pQueryInfo);
+// updateFunctionInterBuf(pQueryInfo, isSTable);
+
+ if ((code = validateFillNode(pCmd, pQueryInfo, pSqlNode)) != TSDB_CODE_SUCCESS) {
return code;
}
}
+ { // set the query info
+ pQueryInfo->projectionQuery = tscIsProjectionQuery(pQueryInfo);
+ pQueryInfo->hasFilter = tscHasColumnFilter(pQueryInfo);
+ pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
+ pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
+ pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
+ pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
+ pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
+
+ SExprInfo** p = NULL;
+ int32_t numOfExpr = 0;
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr);
+ if (pQueryInfo->exprList1 == NULL) {
+ pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
+ }
+
+ taosArrayAddBatch(pQueryInfo->exprList1, (void*) p, numOfExpr);
+ tfree(p);
+ }
+
+#if 0
+ SQueryNode* p = qCreateQueryPlan(pQueryInfo);
+ char* s = queryPlanToString(p);
+ printf("%s\n", s);
+ tfree(s);
+ qDestroyQueryPlan(p);
+#endif
+
return TSDB_CODE_SUCCESS; // Does not build query message here
}
-int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid) {
+int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, uint64_t *uid) {
tExprNode* pLeft = NULL;
tExprNode* pRight= NULL;
+ SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (pSqlExpr->pLeft != NULL) {
int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pQueryInfo, pCols, uid);
@@ -7415,6 +8919,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
if (pSqlExpr->pRight != NULL) {
int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pQueryInfo, pCols, uid);
if (ret != TSDB_CODE_SUCCESS) {
+ tExprTreeDestroy(pLeft, NULL);
return ret;
}
}
@@ -7424,55 +8929,81 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
return TSDB_CODE_SUCCESS;
}
- if (pSqlExpr->pLeft == NULL) {
+ if (pSqlExpr->pLeft == NULL) { // it is the leaf node
+ assert(pSqlExpr->pRight == NULL);
+
if (pSqlExpr->type == SQL_NODE_VALUE) {
+ int32_t ret = TSDB_CODE_SUCCESS;
*pExpr = calloc(1, sizeof(tExprNode));
(*pExpr)->nodeType = TSQL_NODE_VALUE;
(*pExpr)->pVal = calloc(1, sizeof(tVariant));
-
tVariantAssign((*pExpr)->pVal, &pSqlExpr->value);
- return TSDB_CODE_SUCCESS;
+
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx)->pTableMeta;
+ if (pCols != NULL) {
+ size_t colSize = taosArrayGetSize(pCols);
+
+ if (colSize > 0) {
+ SColIndex* idx = taosArrayGet(pCols, colSize - 1);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
+ // convert time by precision
+ if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) {
+ ret = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, (*pExpr)->pVal);
+ }
+ }
+ }
+ return ret;
} else if (pSqlExpr->type == SQL_NODE_SQLFUNCTION) {
// arithmetic expression on the results of aggregation functions
*pExpr = calloc(1, sizeof(tExprNode));
(*pExpr)->nodeType = TSQL_NODE_COL;
(*pExpr)->pSchema = calloc(1, sizeof(SSchema));
- strncpy((*pExpr)->pSchema->name, pSqlExpr->token.z, pSqlExpr->token.n);
+ strncpy((*pExpr)->pSchema->name, pSqlExpr->exprToken.z, pSqlExpr->exprToken.n);
// set the input column data byte and type.
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* p1 = taosArrayGetP(pQueryInfo->exprList, i);
+ SExprInfo* p1 = taosArrayGetP(pQueryInfo->exprList, i);
- if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) {
- (*pExpr)->pSchema->type = (uint8_t)p1->resType;
- (*pExpr)->pSchema->bytes = p1->resBytes;
- (*pExpr)->pSchema->colId = p1->resColId;
+ if (strcmp((*pExpr)->pSchema->name, p1->base.aliasName) == 0) {
+ (*pExpr)->pSchema->type = (uint8_t)p1->base.resType;
+ (*pExpr)->pSchema->bytes = p1->base.resBytes;
+ (*pExpr)->pSchema->colId = p1->base.resColId;
if (uid != NULL) {
- *uid = p1->uid;
+ *uid = p1->base.uid;
}
break;
}
}
} else if (pSqlExpr->type == SQL_NODE_TABLE_COLUMN) { // column name, normal column arithmetic expression
- SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index);
+ int32_t ret = getColumnIndexByName(&pSqlExpr->columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd));
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
+ pQueryInfo->curTableIdx = index.tableIndex;
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index.tableIndex)->pTableMeta;
int32_t numOfColumns = tscGetNumOfColumns(pTableMeta);
*pExpr = calloc(1, sizeof(tExprNode));
(*pExpr)->nodeType = TSQL_NODE_COL;
(*pExpr)->pSchema = calloc(1, sizeof(SSchema));
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
- *(*pExpr)->pSchema = *pSchema;
+ SSchema* pSchema = NULL;
+
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ pSchema = (*pExpr)->pSchema;
+ strcpy(pSchema->name, TSQL_TBNAME_L);
+ pSchema->type = TSDB_DATA_TYPE_BINARY;
+ pSchema->colId = TSDB_TBNAME_COLUMN_INDEX;
+ pSchema->bytes = -1;
+ } else {
+ pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ *(*pExpr)->pSchema = *pSchema;
+ }
if (pCols != NULL) { // record the involved columns
SColIndex colIndex = {0};
@@ -7485,8 +9016,40 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
}
return TSDB_CODE_SUCCESS;
+ } else if (pSqlExpr->tokenId == TK_SET) {
+ int32_t colType = -1;
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx)->pTableMeta;
+ if (pCols != NULL) {
+ size_t colSize = taosArrayGetSize(pCols);
+
+ if (colSize > 0) {
+ SColIndex* idx = taosArrayGet(pCols, colSize - 1);
+ if (idx->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ colType = TSDB_DATA_TYPE_BINARY;
+ } else {
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
+ if (pSchema != NULL) {
+ colType = pSchema->type;
+ }
+ }
+ }
+ }
+ tVariant *pVal;
+ if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) {
+ colType = TSDB_DATA_TYPE_BIGINT;
+ } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
+ colType = TSDB_DATA_TYPE_DOUBLE;
+ }
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx);
+ STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
+ if (serializeExprListToVariant(pSqlExpr->Expr.paramList, &pVal, colType, tinfo.precision) == false) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
+ }
+ *pExpr = calloc(1, sizeof(tExprNode));
+ (*pExpr)->nodeType = TSQL_NODE_VALUE;
+ (*pExpr)->pVal = pVal;
} else {
- return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
}
} else {
@@ -7498,7 +9061,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
(*pExpr)->_node.pRight = pRight;
SStrToken t = {.type = pSqlExpr->tokenId};
- (*pExpr)->_node.optr = convertOptr(&t);
+ (*pExpr)->_node.optr = convertRelationalOperator(&t);
assert((*pExpr)->_node.optr != 0);
@@ -7506,9 +9069,9 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
if ((*pExpr)->_node.optr == TSDB_BINARY_OP_DIVIDE) {
if (pRight->nodeType == TSQL_NODE_VALUE) {
if (pRight->pVal->nType == TSDB_DATA_TYPE_INT && pRight->pVal->i64 == 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
} else if (pRight->pVal->nType == TSDB_DATA_TYPE_FLOAT && pRight->pVal->dKey == 0) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
@@ -7516,8 +9079,8 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
// NOTE: binary|nchar data allows the >|< type filter
if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) {
if (pRight != NULL && pRight->nodeType == TSQL_NODE_VALUE) {
- if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (pRight->pVal->nType == TSDB_DATA_TYPE_BOOL && pLeft->pSchema->type == TSDB_DATA_TYPE_BOOL) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
}
@@ -7530,10 +9093,44 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
- if (pCol->numOfFilters > 0) {
+ if (pCol->info.flist.numOfFilters > 0) {
return true;
}
}
return false;
}
+
+#if 0
+void normalizeSqlNode(SSqlNode* pSqlNode, const char* dbName) {
+ assert(pSqlNode != NULL);
+
+ if (pSqlNode->from->type == SQL_NODE_FROM_TABLELIST) {
+// SRelElementPair *item = taosArrayGet(pSqlNode->from->list, 0);
+// item->TableName.name;
+ }
+
+ // 1. pSqlNode->pSelNodeList
+ if (pSqlNode->pSelNodeList != NULL && taosArrayGetSize(pSqlNode->pSelNodeList) > 0) {
+ SArray* pSelNodeList = pSqlNode->pSelNodeList;
+ size_t numOfExpr = taosArrayGetSize(pSelNodeList);
+ for (int32_t i = 0; i < numOfExpr; ++i) {
+ tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
+ int32_t type = pItem->pNode->type;
+ if (type == SQL_NODE_VALUE || type == SQL_NODE_EXPR) {
+ continue;
+ }
+
+ if (type == SQL_NODE_TABLE_COLUMN) {
+ }
+ }
+ }
+
+// 2. pSqlNode->pWhere
+// 3. pSqlNode->pHaving
+// 4. pSqlNode->pSortOrder
+// pSqlNode->from
+}
+
+#endif
+
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index 599767803c9e80c110bce2e10987d864a071ed26..0239805e9c7de7d8ffb2ad3b6175792fdd6c2acf 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -13,17 +13,19 @@
* along with this program. If not, see .
*/
+#include
#include "os.h"
+#include "qPlan.h"
+#include "qTableMeta.h"
#include "tcmdtype.h"
+#include "tlockfree.h"
#include "trpc.h"
-#include "tscLocalMerge.h"
+#include "tscGlobalmerge.h"
#include "tscLog.h"
#include "tscProfile.h"
#include "tscUtil.h"
-#include "tschemautil.h"
#include "tsclient.h"
#include "ttimer.h"
-#include "tlockfree.h"
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@@ -46,6 +48,31 @@ static int32_t getWaitingTimeInterval(int32_t count) {
return initial * ((2u)<<(count - 2));
}
+static int32_t vgIdCompare(const void *lhs, const void *rhs) {
+ int32_t left = *(int32_t *)lhs;
+ int32_t right = *(int32_t *)rhs;
+
+ if (left == right) {
+ return 0;
+ } else {
+ return left > right ? 1 : -1;
+ }
+}
+static int32_t removeDupVgid(int32_t *src, int32_t sz) {
+ if (src == NULL || sz <= 0) {
+ return 0;
+ }
+ qsort(src, sz, sizeof(src[0]), vgIdCompare);
+
+ int32_t ret = 1;
+ for (int i = 1; i < sz; i++) {
+ if (src[i] != src[i - 1]) {
+ src[ret++] = src[i];
+ }
+ }
+ return ret;
+}
+
static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
@@ -115,7 +142,7 @@ static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgrou
static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pSql->cmd;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) {
return;
}
@@ -130,14 +157,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) {
assert(vgId > 0);
SNewVgroupInfo vgroupInfo = {.vgId = -1};
- taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
+ taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
vgroupInfo.inUse = pEpSet->inUse;
vgroupInfo.numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; i < vgroupInfo.numOfEps; i++) {
- strncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
+ tstrncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
vgroupInfo.ep[i].port = pEpSet->port[i];
}
@@ -269,7 +296,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
assert(pHB->self == pObj->hbrid);
pHB->retry = 0;
- int32_t code = tscProcessSql(pHB);
+ int32_t code = tscBuildAndSendRequest(pHB, NULL);
taosReleaseRef(tscObjRef, pObj->hbrid);
if (code != TSDB_CODE_SUCCESS) {
@@ -310,6 +337,184 @@ int tscSendMsgToServer(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
+//static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
+// SRpcMsg* rpcMsg = pSchedMsg->ahandle;
+// SRpcEpSet* pEpSet = pSchedMsg->thandle;
+//
+// TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
+// SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
+// if (pSql == NULL) {
+// rpcFreeCont(rpcMsg->pCont);
+// free(rpcMsg);
+// free(pEpSet);
+// return;
+// }
+//
+// assert(pSql->self == handle);
+//
+// STscObj *pObj = pSql->pTscObj;
+// SSqlRes *pRes = &pSql->res;
+// SSqlCmd *pCmd = &pSql->cmd;
+//
+// pSql->rpcRid = -1;
+//
+// if (pObj->signature != pObj) {
+// tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature);
+//
+// taosRemoveRef(tscObjRef, handle);
+// taosReleaseRef(tscObjRef, handle);
+// rpcFreeCont(rpcMsg->pCont);
+// free(rpcMsg);
+// free(pEpSet);
+// return;
+// }
+//
+// SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+// if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) {
+// tscDebug("0x%"PRIx64" sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
+// pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
+//
+// taosRemoveRef(tscObjRef, handle);
+// taosReleaseRef(tscObjRef, handle);
+// rpcFreeCont(rpcMsg->pCont);
+// free(rpcMsg);
+// free(pEpSet);
+// return;
+// }
+//
+// if (pEpSet) {
+// if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
+// if (pCmd->command < TSDB_SQL_MGMT) {
+// tscUpdateVgroupInfo(pSql, pEpSet);
+// } else {
+// tscUpdateMgmtEpSet(pSql, pEpSet);
+// }
+// }
+// }
+//
+// int32_t cmd = pCmd->command;
+//
+// // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
+// if (cmd == TSDB_SQL_INSERT && rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
+// pSql->cmd.insertParam.schemaAttached = 1;
+// }
+//
+// // single table query error need to be handled here.
+// if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
+// (((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
+// rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
+//
+// // 1. super table subquery
+// // 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
+// if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
+// TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
+// !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
+// (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
+// // do nothing in case of super table subquery
+// } else {
+// pSql->retry += 1;
+// tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
+//
+// pSql->res.code = rpcMsg->code; // keep the previous error code
+// if (pSql->retry > pSql->maxRetry) {
+// tscError("0x%" PRIx64 " max retry %d reached, give up", pSql->self, pSql->maxRetry);
+// } else {
+// // wait for a little bit moment and then retry
+// // todo do not sleep in rpc callback thread, add this process into queue to process
+// if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+// int32_t duration = getWaitingTimeInterval(pSql->retry);
+// taosMsleep(duration);
+// }
+//
+// pSql->retryReason = rpcMsg->code;
+// rpcMsg->code = tscRenewTableMeta(pSql, 0);
+// // if there is an error occurring, proceed to the following error handling procedure.
+// if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+// taosReleaseRef(tscObjRef, handle);
+// rpcFreeCont(rpcMsg->pCont);
+// free(rpcMsg);
+// free(pEpSet);
+// return;
+// }
+// }
+// }
+// }
+//
+// pRes->rspLen = 0;
+//
+// if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
+// tscDebug("0x%"PRIx64" query is cancelled, code:%s", pSql->self, tstrerror(pRes->code));
+// } else {
+// pRes->code = rpcMsg->code;
+// }
+//
+// if (pRes->code == TSDB_CODE_SUCCESS) {
+// tscDebug("0x%"PRIx64" reset retry counter to be 0 due to success rsp, old:%d", pSql->self, pSql->retry);
+// pSql->retry = 0;
+// }
+//
+// if (pRes->code != TSDB_CODE_TSC_QUERY_CANCELLED) {
+// assert(rpcMsg->msgType == pCmd->msgType + 1);
+// pRes->code = rpcMsg->code;
+// pRes->rspType = rpcMsg->msgType;
+// pRes->rspLen = rpcMsg->contLen;
+//
+// if (pRes->rspLen > 0 && rpcMsg->pCont) {
+// char *tmp = (char *)realloc(pRes->pRsp, pRes->rspLen);
+// if (tmp == NULL) {
+// pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+// } else {
+// pRes->pRsp = tmp;
+// memcpy(pRes->pRsp, rpcMsg->pCont, pRes->rspLen);
+// }
+// } else {
+// tfree(pRes->pRsp);
+// }
+//
+// /*
+// * There is not response callback function for submit response.
+// * The actual inserted number of points is the first number.
+// */
+// if (rpcMsg->msgType == TSDB_MSG_TYPE_SUBMIT_RSP && pRes->pRsp != NULL) {
+// SShellSubmitRspMsg *pMsg = (SShellSubmitRspMsg*)pRes->pRsp;
+// pMsg->code = htonl(pMsg->code);
+// pMsg->numOfRows = htonl(pMsg->numOfRows);
+// pMsg->affectedRows = htonl(pMsg->affectedRows);
+// pMsg->failedRows = htonl(pMsg->failedRows);
+// pMsg->numOfFailedBlocks = htonl(pMsg->numOfFailedBlocks);
+//
+// pRes->numOfRows += pMsg->affectedRows;
+// tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql->self, sqlCmd[pCmd->command],
+// tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen);
+// } else {
+// tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen);
+// }
+// }
+//
+// if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) {
+// rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
+// }
+//
+// bool shouldFree = tscShouldBeFreed(pSql);
+// if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+// if (rpcMsg->code != TSDB_CODE_SUCCESS) {
+// pRes->code = rpcMsg->code;
+// }
+// rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
+// (*pSql->fp)(pSql->param, pSql, rpcMsg->code);
+// }
+//
+// if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
+// tscDebug("0x%"PRIx64" sqlObj is automatically freed", pSql->self);
+// taosRemoveRef(tscObjRef, handle);
+// }
+//
+// taosReleaseRef(tscObjRef, handle);
+// rpcFreeCont(rpcMsg->pCont);
+// free(rpcMsg);
+// free(pEpSet);
+//}
+
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
@@ -317,6 +522,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcFreeCont(rpcMsg->pCont);
return;
}
+
assert(pSql->self == handle);
STscObj *pObj = pSql->pTscObj;
@@ -324,7 +530,6 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pSql->cmd;
pSql->rpcRid = -1;
-
if (pObj->signature != pObj) {
tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature);
@@ -334,7 +539,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) {
tscDebug("0x%"PRIx64" sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
@@ -359,42 +564,51 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
// set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
if (cmd == TSDB_SQL_INSERT && rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
- pSql->cmd.submitSchema = 1;
+ pSql->cmd.insertParam.schemaAttached = 1;
}
+ // single table query error need to be handled here.
if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) &&
- (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID ||
- rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
- rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL ||
- rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
-
- pSql->retry++;
- tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
-
- pSql->res.code = rpcMsg->code; // keep the previous error code
- if (pSql->retry > pSql->maxRetry) {
- tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry);
- } else {
- // wait for a little bit moment and then retry
- // todo do not sleep in rpc callback thread, add this process into queueu to process
- if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
- int32_t duration = getWaitingTimeInterval(pSql->retry);
- taosMsleep(duration);
- }
+ (((rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID)) ||
+ rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
+
+ // 1. super table subquery
+ // 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
+ if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
+ TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
+ (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)
+ || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY))) {
+ // do nothing in case of super table subquery
+ } else {
+ pSql->retry += 1;
+ tscWarn("0x%" PRIx64 " it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
+
+ pSql->res.code = rpcMsg->code; // keep the previous error code
+ if (pSql->retry > pSql->maxRetry) {
+ tscError("0x%" PRIx64 " max retry %d reached, give up", pSql->self, pSql->maxRetry);
+ } else {
+ // wait for a little bit moment and then retry
+ // todo do not sleep in rpc callback thread, add this process into queue to process
+ if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ int32_t duration = getWaitingTimeInterval(pSql->retry);
+ taosMsleep(duration);
+ }
- pSql->retryReason = rpcMsg->code;
- rpcMsg->code = tscRenewTableMeta(pSql, 0);
- // if there is an error occurring, proceed to the following error handling procedure.
- if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, handle);
- rpcFreeCont(rpcMsg->pCont);
- return;
+ pSql->retryReason = rpcMsg->code;
+ rpcMsg->code = tscRenewTableMeta(pSql, 0);
+ // if there is an error occurring, proceed to the following error handling procedure.
+ if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ taosReleaseRef(tscObjRef, handle);
+ rpcFreeCont(rpcMsg->pCont);
+ return;
+ }
}
}
}
pRes->rspLen = 0;
-
+
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
tscDebug("0x%"PRIx64" query is cancelled, code:%s", pSql->self, tstrerror(pRes->code));
} else {
@@ -443,7 +657,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen);
}
}
-
+
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
}
@@ -457,19 +671,16 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
}
-
-
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
tscDebug("0x%"PRIx64" sqlObj is automatically freed", pSql->self);
taosRemoveRef(tscObjRef, handle);
}
taosReleaseRef(tscObjRef, handle);
-
rpcFreeCont(rpcMsg->pCont);
}
-int doProcessSql(SSqlObj *pSql) {
+int doBuildAndSendMsg(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@@ -479,7 +690,7 @@ int doProcessSql(SSqlObj *pSql) {
pCmd->command == TSDB_SQL_INSERT ||
pCmd->command == TSDB_SQL_CONNECT ||
pCmd->command == TSDB_SQL_HB ||
- pCmd->command == TSDB_SQL_META ||
+ pCmd->command == TSDB_SQL_RETRIEVE_FUNC ||
pCmd->command == TSDB_SQL_STABLEVGROUP) {
pRes->code = tscBuildMsg[pCmd->command](pSql, NULL);
}
@@ -501,13 +712,16 @@ int doProcessSql(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
-int tscProcessSql(SSqlObj *pSql) {
+int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
SSqlCmd *pCmd = &pSql->cmd;
uint32_t type = 0;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ if (pQueryInfo == NULL) {
+ pQueryInfo = tscGetQueryInfo(pCmd);
+ }
+
STableMetaInfo *pTableMetaInfo = NULL;
if (pQueryInfo != NULL) {
@@ -519,7 +733,7 @@ int tscProcessSql(SSqlObj *pSql) {
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
- assert((pQueryInfo->numOfTables == 0 && pQueryInfo->command == TSDB_SQL_HB) || pQueryInfo->numOfTables > 0);
+ assert((pQueryInfo->numOfTables == 0 && (pQueryInfo->command == TSDB_SQL_HB || pSql->cmd.command == TSDB_SQL_RETRIEVE_FUNC)) || pQueryInfo->numOfTables > 0);
}
tscDebug("0x%"PRIx64" SQL cmd:%s will be processed, name:%s, type:%d", pSql->self, sqlCmd[pCmd->command], name, type);
@@ -532,15 +746,16 @@ int tscProcessSql(SSqlObj *pSql) {
return (*tscProcessMsgRsp[pCmd->command])(pSql);
}
- return doProcessSql(pSql);
+ return doBuildAndSendMsg(pSql);
}
int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg *) pSql->cmd.payload;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
- pRetrieveMsg->free = htons(pQueryInfo->type);
- pRetrieveMsg->qId = htobe64(pSql->res.qId);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+
+ pRetrieveMsg->free = htons(pQueryInfo->type);
+ pRetrieveMsg->qId = htobe64(pSql->res.qId);
// todo valid the vgroupId at the client side
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -579,56 +794,40 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
-
- char* pMsg = pSql->cmd.payload;
-
- // NOTE: shell message size should not include SMsgDesc
- int32_t size = pSql->cmd.payloadLen - sizeof(SMsgDesc);
-
- SMsgDesc* pMsgDesc = (SMsgDesc*) pMsg;
- pMsgDesc->numOfVnodes = htonl(1); // always one vnode
-
- pMsg += sizeof(SMsgDesc);
- SSubmitMsg *pShellMsg = (SSubmitMsg *)pMsg;
-
- pShellMsg->header.vgId = htonl(pTableMeta->vgId);
- pShellMsg->header.contLen = htonl(size); // the length not includes the size of SMsgDesc
- pShellMsg->length = pShellMsg->header.contLen;
-
- pShellMsg->numOfBlocks = htonl(pSql->cmd.numOfTablesInSubmit); // number of tables to be inserted
// pSql->cmd.payloadLen is set during copying data into payload
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
SNewVgroupInfo vgroupInfo = {0};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
+ taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
- tscDebug("0x%"PRIx64" build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql->self, pTableMeta->vgId, pSql->cmd.numOfTablesInSubmit,
- pSql->epSet.numOfEps);
+ tscDebug("0x%"PRIx64" submit msg built, numberOfEP:%d", pSql->self, pSql->epSet.numOfEps);
+
return TSDB_CODE_SUCCESS;
}
/*
* for table query, simply return the size <= 1k
*/
-static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
+static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5;
SSqlCmd* pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo);
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
- int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2);
+ int32_t srcTagFilterSize = tscGetTagFilterSerializeLen(pQueryInfo);
+
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2);
int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0;
int32_t sqlLen = (int32_t) strlen(pSql->sqlstr) + 1;
-
int32_t tableSerialize = 0;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pVgroupTables != NULL) {
@@ -643,18 +842,17 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
tableSerialize = totalTables * sizeof(STableIdInfo);
}
-
SCond* pCond = &pQueryInfo->tagCond.tbnameCond;
if (pCond->len > 0) {
srcColListSize += pCond->len;
}
- return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize +
+ return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize +
exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
}
-static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg, int32_t *succeed) {
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
+static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg,
+ int32_t *succeed) {
TSKEY dfltKey = htobe64(pQueryMsg->window.skey);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
@@ -683,11 +881,13 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
vgId = pTableMeta->vgId;
SNewVgroupInfo vgroupInfo = {0};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
+ taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
}
- pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
+ if (pSql->epSet.numOfEps > 0){
+ pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
+ }
pQueryMsg->head.vgId = htonl(vgId);
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
@@ -733,318 +933,216 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
return pMsg;
}
+// TODO refactor
+static int32_t serializeColFilterInfo(SColumnFilterInfo* pColFilters, int16_t numOfFilters, char** pMsg) {
+ // append the filter information after the basic column information
+ for (int32_t f = 0; f < numOfFilters; ++f) {
+ SColumnFilterInfo *pColFilter = &pColFilters[f];
+
+ SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)(*pMsg);
+ pFilterMsg->filterstr = htons(pColFilter->filterstr);
+
+ (*pMsg) += sizeof(SColumnFilterInfo);
+
+ if (pColFilter->filterstr) {
+ pFilterMsg->len = htobe64(pColFilter->len);
+ memcpy(*pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
+ (*pMsg) += (pColFilter->len + 1); // append the additional filter binary info
+ } else {
+ pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
+ pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
+ }
+
+ pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
+ pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
+
+ if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
+ tscError("invalid filter info");
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, char** pMsg, int64_t id, bool validateColumn) {
+ STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
+
+ // the queried table has been removed and a new table with the same name has already been created already
+ // return error msg
+ if (pExpr->uid != pTableMeta->id.uid) {
+ tscError("0x%"PRIx64" table has already been destroyed", id);
+ return TSDB_CODE_TSC_INVALID_TABLE_NAME;
+ }
+
+ if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
+ tscError("0x%"PRIx64" table schema is not matched with parsed sql", id);
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ assert(pExpr->resColId < 0);
+ SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg);
+
+ SColIndex* pIndex = &pSqlExpr->colInfo;
+
+ pIndex->colId = htons(pExpr->colInfo.colId);
+ pIndex->colIndex = htons(pExpr->colInfo.colIndex);
+ pIndex->flag = htons(pExpr->colInfo.flag);
+ pSqlExpr->uid = htobe64(pExpr->uid);
+ pSqlExpr->colType = htons(pExpr->colType);
+ pSqlExpr->colBytes = htons(pExpr->colBytes);
+ pSqlExpr->resType = htons(pExpr->resType);
+ pSqlExpr->resBytes = htons(pExpr->resBytes);
+ pSqlExpr->interBytes = htonl(pExpr->interBytes);
+ pSqlExpr->functionId = htons(pExpr->functionId);
+ pSqlExpr->numOfParams = htons(pExpr->numOfParams);
+ pSqlExpr->resColId = htons(pExpr->resColId);
+ pSqlExpr->flist.numOfFilters = htons(pExpr->flist.numOfFilters);
+
+ (*pMsg) += sizeof(SSqlExpr);
+ for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
+ pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType);
+ pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen);
+
+ if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
+ memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen);
+ (*pMsg) += pExpr->param[j].nLen;
+ } else {
+ pSqlExpr->param[j].i64 = htobe64(pExpr->param[j].i64);
+ }
+ }
+
+ serializeColFilterInfo(pExpr->flist.filterInfo, pExpr->flist.numOfFilters, pMsg);
+
+ return TSDB_CODE_SUCCESS;
+}
+
int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
- int32_t size = tscEstimateQueryMsgSize(pSql, pCmd->clauseIndex);
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t size = tscEstimateQueryMsgSize(pSql);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
- tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
- return TSDB_CODE_TSC_INVALID_SQL; // todo add test for this
+ tscError("%p failed to malloc for query msg", pSql);
+ return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this
}
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
- size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
- if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
- tscError("0x%"PRIx64" illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql->self, (uint64_t)numOfSrcCols,
- tscGetNumOfColumns(pTableMeta));
+ SQueryAttr query = {{0}};
+ tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql);
+ query.vgId = pTableMeta->vgId;
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
- if (pQueryInfo->interval.interval < 0) {
- tscError("0x%"PRIx64" illegal value of aggregation time interval in query msg: %" PRId64, pSql->self, (int64_t)pQueryInfo->interval.interval);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
- if (pQueryInfo->groupbyExpr.numOfGroupCols < 0) {
- tscError("0x%"PRIx64" illegal value of numOfGroupCols in query msg: %d", pSql->self, pQueryInfo->groupbyExpr.numOfGroupCols);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
+ SArray* tableScanOperator = createTableScanPlan(&query);
+ SArray* queryOperator = createExecOperatorPlan(&query);
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
- int32_t numOfTags = (int32_t)taosArrayGetSize(pTableMetaInfo->tagColList);
+ int32_t numOfTags = query.numOfTags;
int32_t sqlLen = (int32_t) strlen(pSql->sqlstr);
- if (pQueryInfo->order.order == TSDB_ORDER_ASC) {
- pQueryMsg->window.skey = htobe64(pQueryInfo->window.skey);
- pQueryMsg->window.ekey = htobe64(pQueryInfo->window.ekey);
+ if (taosArrayGetSize(tableScanOperator) == 0) {
+ pQueryMsg->tableScanOperator = htonl(-1);
} else {
- pQueryMsg->window.skey = htobe64(pQueryInfo->window.ekey);
- pQueryMsg->window.ekey = htobe64(pQueryInfo->window.skey);
- }
-
- pQueryMsg->order = htons(pQueryInfo->order.order);
- pQueryMsg->orderColId = htons(pQueryInfo->order.orderColId);
- pQueryMsg->fillType = htons(pQueryInfo->fillType);
- pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
- pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
- pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
- pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
- pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
- pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
- pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
- pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
- pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
+ int32_t* tablescanOp = taosArrayGet(tableScanOperator, 0);
+ pQueryMsg->tableScanOperator = htonl(*tablescanOp);
+ }
+
+ pQueryMsg->window.skey = htobe64(query.window.skey);
+ pQueryMsg->window.ekey = htobe64(query.window.ekey);
+
+ pQueryMsg->order = htons(query.order.order);
+ pQueryMsg->orderColId = htons(query.order.orderColId);
+ pQueryMsg->fillType = htons(query.fillType);
+ pQueryMsg->limit = htobe64(query.limit.limit);
+ pQueryMsg->offset = htobe64(query.limit.offset);
+ pQueryMsg->numOfCols = htons(query.numOfCols);
+
+ pQueryMsg->interval.interval = htobe64(query.interval.interval);
+ pQueryMsg->interval.sliding = htobe64(query.interval.sliding);
+ pQueryMsg->interval.offset = htobe64(query.interval.offset);
+ pQueryMsg->interval.intervalUnit = query.interval.intervalUnit;
+ pQueryMsg->interval.slidingUnit = query.interval.slidingUnit;
+ pQueryMsg->interval.offsetUnit = query.interval.offsetUnit;
+
+ pQueryMsg->stableQuery = query.stableQuery;
+ pQueryMsg->topBotQuery = query.topBotQuery;
+ pQueryMsg->groupbyColumn = query.groupbyColumn;
+ pQueryMsg->hasTagResults = query.hasTagResults;
+ pQueryMsg->timeWindowInterpo = query.timeWindowInterpo;
+ pQueryMsg->queryBlockDist = query.queryBlockDist;
+ pQueryMsg->stabledev = query.stabledev;
+ pQueryMsg->tsCompQuery = query.tsCompQuery;
+ pQueryMsg->simpleAgg = query.simpleAgg;
+ pQueryMsg->pointInterpQuery = query.pointInterpQuery;
+ pQueryMsg->needReverseScan = query.needReverseScan;
+ pQueryMsg->stateWindow = query.stateWindow;
+
+ pQueryMsg->numOfTags = htonl(numOfTags);
+ pQueryMsg->sqlstrLen = htonl(sqlLen);
+ pQueryMsg->sw.gap = htobe64(query.sw.gap);
+ pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX);
+
+ pQueryMsg->secondStageOutput = htonl(query.numOfExpr2);
+ pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number
+
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len);
- pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->queryType = htonl(pQueryInfo->type);
- pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
- pQueryMsg->sqlstrLen = htonl(sqlLen);
pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen);
- pQueryMsg->sw.gap = htobe64(pQueryInfo->sessionWindow.gap);
- pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX);
-
- size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
- pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
// set column list ids
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
- char *pMsg = (char *)(pQueryMsg->colList) + numOfCols * sizeof(SColumnInfo);
- SSchema *pSchema = tscGetTableSchema(pTableMeta);
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
- SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
+ char *pMsg = (char *)(pQueryMsg->tableCols) + numOfCols * sizeof(SColumnInfo);
- if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) {
- char n[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, n);
-
-
- tscError("0x%"PRIx64" tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
- pSql->self, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
- pColSchema->name);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColumnInfo *pCol = &query.tableCols[i];
- pQueryMsg->colList[i].colId = htons(pColSchema->colId);
- pQueryMsg->colList[i].bytes = htons(pColSchema->bytes);
- pQueryMsg->colList[i].type = htons(pColSchema->type);
- pQueryMsg->colList[i].numOfFilters = htons(pCol->numOfFilters);
+ pQueryMsg->tableCols[i].colId = htons(pCol->colId);
+ pQueryMsg->tableCols[i].bytes = htons(pCol->bytes);
+ pQueryMsg->tableCols[i].type = htons(pCol->type);
+ pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters);
// append the filter information after the basic column information
- for (int32_t f = 0; f < pCol->numOfFilters; ++f) {
- SColumnFilterInfo *pColFilter = &pCol->filterInfo[f];
-
- SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pMsg;
- pFilterMsg->filterstr = htons(pColFilter->filterstr);
-
- pMsg += sizeof(SColumnFilterInfo);
-
- if (pColFilter->filterstr) {
- pFilterMsg->len = htobe64(pColFilter->len);
- memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
- pMsg += (pColFilter->len + 1); // append the additional filter binary info
- } else {
- pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
- pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
- }
-
- pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
- pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
-
- if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
- tscError("invalid filter info");
- return TSDB_CODE_TSC_INVALID_SQL;
- }
- }
+ serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg);
}
- SSqlFuncMsg *pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
- for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
-
- // the queried table has been removed and a new table with the same name has already been created already
- // return error msg
- if (pExpr->uid != pTableMeta->id.uid) {
- tscError("0x%"PRIx64" table has already been destroyed", pSql->self);
- return TSDB_CODE_TSC_INVALID_TABLE_NAME;
- }
-
- if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
- tscError("0x%"PRIx64" table schema is not matched with parsed sql", pSql->self);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
- assert(pExpr->resColId < 0);
-
- pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId);
- pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
- pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag);
-
- if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
- pSqlFuncExpr->colType = htons(pExpr->resType);
- pSqlFuncExpr->colBytes = htons(pExpr->resBytes);
- } else if (pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
- SSchema *s = tGetTbnameColumnSchema();
-
- pSqlFuncExpr->colType = htons(s->type);
- pSqlFuncExpr->colBytes = htons(s->bytes);
- } else if (pExpr->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) {
- SSchema s = tGetBlockDistColumnSchema();
-
- pSqlFuncExpr->colType = htons(s.type);
- pSqlFuncExpr->colBytes = htons(s.bytes);
- } else {
- SSchema* s = tscGetColumnSchemaById(pTableMeta, pExpr->colInfo.colId);
- pSqlFuncExpr->colType = htons(s->type);
- pSqlFuncExpr->colBytes = htons(s->bytes);
- }
-
- pSqlFuncExpr->functionId = htons(pExpr->functionId);
- pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
- pSqlFuncExpr->resColId = htons(pExpr->resColId);
- if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) {
- pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters);
- } else {
- pSqlFuncExpr->filterNum = 0;
- }
-
- pMsg += sizeof(SSqlFuncMsg);
-
- if (pSqlFuncExpr->filterNum) {
- pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters;
-
- // append the filter information after the basic column information
- for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) {
- SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f];
-
- SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f];
- pFilterMsg->filterstr = htons(pColFilter->filterstr);
-
- if (pColFilter->filterstr) {
- pFilterMsg->len = htobe64(pColFilter->len);
- memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1));
- pMsg += (pColFilter->len + 1); // append the additional filter binary info
- } else {
- pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi);
- pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi);
- }
-
- pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr);
- pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr);
-
- if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) {
- tscError("invalid filter info");
- return TSDB_CODE_TSC_INVALID_SQL;
- }
- }
- }
-
-
- for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
- pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
- pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);
-
- if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
- memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
- pMsg += pExpr->param[j].nLen;
- } else {
- pSqlFuncExpr->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64);
- }
+ for (int32_t i = 0; i < query.numOfOutput; ++i) {
+ code = serializeSqlExpr(&query.pExpr1[i].base, pTableMetaInfo, &pMsg, pSql->self, true);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
}
-
- pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
}
- size_t output = tscNumOfFields(pQueryInfo);
-
- if (tscIsSecondStageQuery(pQueryInfo)) {
- pQueryMsg->secondStageOutput = htonl((int32_t) output);
-
- SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
-
- for (int32_t i = 0; i < output; ++i) {
- SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
- SSqlExpr *pExpr = pField->pSqlExpr;
-
- // this should be switched to projection query
- if (pExpr != NULL) {
- // the queried table has been removed and a new table with the same name has already been created already
- // return error msg
- if (pExpr->uid != pTableMeta->id.uid) {
- tscError("0x%"PRIx64" table has already been destroyed", pSql->self);
- return TSDB_CODE_TSC_INVALID_TABLE_NAME;
- }
-
- if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
- tscError("0x%"PRIx64" table schema is not matched with parsed sql", pSql->self);
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
- pSqlFuncExpr1->numOfParams = 0; // no params for projection query
- pSqlFuncExpr1->functionId = htons(TSDB_FUNC_PRJ);
- pSqlFuncExpr1->colInfo.colId = htons(pExpr->resColId);
- pSqlFuncExpr1->colInfo.flag = htons(TSDB_COL_NORMAL);
-
- bool assign = false;
- for (int32_t f = 0; f < tscSqlExprNumOfExprs(pQueryInfo); ++f) {
- SSqlExpr *pe = tscSqlExprGet(pQueryInfo, f);
- if (pe == pExpr) {
- pSqlFuncExpr1->colInfo.colIndex = htons(f);
- pSqlFuncExpr1->colType = htons(pe->resType);
- pSqlFuncExpr1->colBytes = htons(pe->resBytes);
- assign = true;
- break;
- }
- }
-
- assert(assign);
- pMsg += sizeof(SSqlFuncMsg);
- pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
- } else {
- assert(pField->pArithExprInfo != NULL);
- SExprInfo* pExprInfo = pField->pArithExprInfo;
-
- pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
- pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId);
- pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams);
- pMsg += sizeof(SSqlFuncMsg);
-
- for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
- // todo add log
- pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType);
- pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes);
-
- if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) {
- memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes);
- pMsg += pExprInfo->base.arg[j].argBytes;
- } else {
- pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64);
- }
- }
-
- pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
- }
+ for (int32_t i = 0; i < query.numOfExpr2; ++i) {
+ code = serializeSqlExpr(&query.pExpr2[i].base, pTableMetaInfo, &pMsg, pSql->self, false);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
}
- } else {
- pQueryMsg->secondStageOutput = 0;
}
int32_t succeed = 1;
-
+
// serialize the table info (sid, uid, tags)
- pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg, &succeed);
+ pMsg = doSerializeTableInfo(pQueryMsg, pSql, pTableMetaInfo, pMsg, &succeed);
if (succeed == 0) {
- return TSDB_CODE_TSC_APP_ERROR;
+ code = TSDB_CODE_TSC_APP_ERROR;
+ goto _end;
}
- SSqlGroupbyExpr *pGroupbyExpr = &pQueryInfo->groupbyExpr;
- if (pGroupbyExpr->numOfGroupCols > 0) {
+ SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr;
+ if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) {
pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex);
pQueryMsg->orderType = htons(pGroupbyExpr->orderType);
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pCol = taosArrayGet(pGroupbyExpr->columnInfo, j);
-
+
*((int16_t *)pMsg) = htons(pCol->colId);
pMsg += sizeof(pCol->colId);
@@ -1053,48 +1151,29 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
*((int16_t *)pMsg) += htons(pCol->flag);
pMsg += sizeof(pCol->flag);
-
+
memcpy(pMsg, pCol->name, tListLen(pCol->name));
pMsg += tListLen(pCol->name);
}
}
- if (pQueryInfo->fillType != TSDB_FILL_NONE) {
- for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
- *((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]);
- pMsg += sizeof(pQueryInfo->fillVal[0]);
+ if (query.fillType != TSDB_FILL_NONE) {
+ for (int32_t i = 0; i < query.numOfOutput; ++i) {
+ *((int64_t *)pMsg) = htobe64(query.fillVal[i]);
+ pMsg += sizeof(query.fillVal[0]);
}
}
-
- if (numOfTags != 0) {
- int32_t numOfColumns = tscGetNumOfColumns(pTableMeta);
- int32_t numOfTagColumns = tscGetNumOfTags(pTableMeta);
- int32_t total = numOfTagColumns + numOfColumns;
-
- pSchema = tscGetTableTagSchema(pTableMeta);
-
- for (int32_t i = 0; i < numOfTags; ++i) {
- SColumn *pCol = taosArrayGetP(pTableMetaInfo->tagColList, i);
- SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
- if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
- (!isValidDataType(pColSchema->type))) {
- char n[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, n);
+ if (query.numOfTags > 0 && query.tagColList != NULL) {
+ for (int32_t i = 0; i < query.numOfTags; ++i) {
+ SColumnInfo* pTag = &query.tagColList[i];
- tscError("0x%"PRIx64" tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
- pSql->self, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name);
-
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
SColumnInfo* pTagCol = (SColumnInfo*) pMsg;
-
- pTagCol->colId = htons(pColSchema->colId);
- pTagCol->bytes = htons(pColSchema->bytes);
- pTagCol->type = htons(pColSchema->type);
- pTagCol->numOfFilters = 0;
-
+ pTagCol->colId = htons(pTag->colId);
+ pTagCol->bytes = htons(pTag->bytes);
+ pTagCol->type = htons(pTag->type);
+ pTagCol->flist.numOfFilters = 0;
+
pMsg += sizeof(SColumnInfo);
}
}
@@ -1102,12 +1181,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// serialize tag column query condition
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) {
STagCond* pTagCond = &pQueryInfo->tagCond;
-
+
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->tagCondLen = htonl(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
-
+
pMsg += pCond->len;
}
}
@@ -1124,21 +1203,58 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
// compressed ts block
- pQueryMsg->tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
+ pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
if (pQueryInfo->tsBuf != NULL) {
// note: here used the index instead of actual vnode id.
int32_t vnodeIndex = pTableMetaInfo->vgroupIndex;
- int32_t code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsLen, &pQueryMsg->tsNumOfBlocks);
+ code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsBuf.tsLen, &pQueryMsg->tsBuf.tsNumOfBlocks);
if (code != TSDB_CODE_SUCCESS) {
- return code;
+ goto _end;
}
- pMsg += pQueryMsg->tsLen;
+ pMsg += pQueryMsg->tsBuf.tsLen;
+
+ pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder);
+ pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
+ pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
+ }
+
+ int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator);
+ pQueryMsg->numOfOperator = htonl(numOfOperator);
+ for(int32_t i = 0; i < numOfOperator; ++i) {
+ int32_t *operator = taosArrayGet(queryOperator, i);
+ *(int32_t*)pMsg = htonl(*operator);
+
+ pMsg += sizeof(int32_t);
+ }
+
+ // support only one udf
+ if (pQueryInfo->pUdfInfo != NULL && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
+ pQueryMsg->udfContentOffset = htonl((int32_t) (pMsg - pCmd->payload));
+ for(int32_t i = 0; i < taosArrayGetSize(pQueryInfo->pUdfInfo); ++i) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, i);
+ *(int8_t*) pMsg = pUdfInfo->resType;
+ pMsg += sizeof(pUdfInfo->resType);
+
+ *(int16_t*) pMsg = htons(pUdfInfo->resBytes);
+ pMsg += sizeof(pUdfInfo->resBytes);
+
+ STR_TO_VARSTR(pMsg, pUdfInfo->name);
+
+ pMsg += varDataTLen(pMsg);
+
+ *(int32_t*) pMsg = htonl(pUdfInfo->funcType);
+ pMsg += sizeof(pUdfInfo->funcType);
+
+ *(int32_t*) pMsg = htonl(pUdfInfo->bufSize);
+ pMsg += sizeof(pUdfInfo->bufSize);
- pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder);
- pQueryMsg->tsLen = htonl(pQueryMsg->tsLen);
- pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks);
+ pQueryMsg->udfContentLen = htonl(pUdfInfo->contLen);
+ memcpy(pMsg, pUdfInfo->content, pUdfInfo->contLen);
+
+ pMsg += pUdfInfo->contLen;
+ }
}
memcpy(pMsg, pSql->sqlstr, sqlLen);
@@ -1149,11 +1265,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tscDebug("0x%"PRIx64" msg built success, len:%d bytes", pSql->self, msgLen);
pCmd->payloadLen = msgLen;
pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY;
-
+
pQueryMsg->head.contLen = htonl(msgLen);
assert(msgLen + minMsgSize() <= (int32_t)pCmd->allocSize);
- return TSDB_CODE_SUCCESS;
+ _end:
+ freeQueryAttr(&query);
+ taosArrayDestroy(tableScanOperator);
+ taosArrayDestroy(queryOperator);
+ return code;
}
int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
@@ -1164,14 +1284,26 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateDbMsg *pCreateDbMsg = (SCreateDbMsg *)pCmd->payload;
- assert(pCmd->numOfClause == 1);
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+// assert(pCmd->numOfClause == 1);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateDbMsg->db);
assert(code == TSDB_CODE_SUCCESS);
return TSDB_CODE_SUCCESS;
}
+int32_t tscBuildCreateFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
+ SSqlCmd *pCmd = &pSql->cmd;
+ SCreateFuncMsg *pCreateFuncMsg = (SCreateFuncMsg *)pCmd->payload;
+
+ pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_FUNCTION;
+
+ pCmd->payloadLen = sizeof(SCreateFuncMsg) + htonl(pCreateFuncMsg->codeLen);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
@@ -1285,7 +1417,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pDropDbMsg->db);
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->name.type == TSDB_DB_NAME_T);
@@ -1296,6 +1428,17 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
+int32_t tscBuildDropFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
+ SSqlCmd *pCmd = &pSql->cmd;
+
+ pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_FUNCTION;
+
+ pCmd->payloadLen = sizeof(SDropFuncMsg);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropTableMsg);
@@ -1306,9 +1449,10 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pDropTableMsg->name);
+ pDropTableMsg->supertable = (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE)? 1:0;
pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
return TSDB_CODE_SUCCESS;
@@ -1363,7 +1507,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
@@ -1380,7 +1524,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
}
SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB;
@@ -1388,7 +1532,6 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
}
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW;
pCmd->payloadLen = sizeof(SShowMsg) + 100;
@@ -1398,19 +1541,26 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
+ SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
+ SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
+
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+ if (pShowInfo->showType == TSDB_MGMT_TABLE_FUNCTION) {
+ pShowMsg->type = pShowInfo->showType;
+ pShowMsg->payloadLen = 0;
+ pCmd->payloadLen = sizeof(SShowMsg);
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ return TSDB_CODE_SUCCESS;
+ }
- if (tNameIsEmpty(&pTableMetaInfo->name)) {
- pthread_mutex_lock(&pObj->mutex);
- tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
- pthread_mutex_unlock(&pObj->mutex);
+ if (tNameIsEmpty(&pTableMetaInfo->name)) {
+ char *p = cloneCurrentDBName(pSql);
+ tstrncpy(pShowMsg->db, p, sizeof(pShowMsg->db));
+ tfree(p);
} else {
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
- SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@@ -1474,7 +1624,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema *pSchema;
SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// Reallocate the payload size
@@ -1543,7 +1693,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
if (type == TSQL_CREATE_STREAM) { // check if it is a stream sql
- SQuerySqlNode *pQuerySql = pInfo->pCreateTableInfo->pSelect;
+ SSqlNode *pQuerySql = pInfo->pCreateTableInfo->pSelect;
strncpy(pMsg, pQuerySql->sqlstr.z, pQuerySql->sqlstr.n + 1);
pCreateMsg->sqlLen = htons(pQuerySql->sqlstr.n + 1);
@@ -1563,7 +1713,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
return minMsgSize() + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE;
}
@@ -1572,7 +1722,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int msgLen = 0;
SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -1601,7 +1751,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
- memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ if (pAlterInfo->tagData.dataLen > 0) {
+ memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ }
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
@@ -1621,11 +1773,11 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pCmd->payloadLen = htonl(pUpdateMsg->head.contLen);
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
SNewVgroupInfo vgroupInfo = {.vgId = -1};
- taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
+ taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo);
assert(vgroupInfo.vgId > 0);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
@@ -1641,11 +1793,65 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
pAlterDbMsg->dbType = -1;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
tNameExtractFullName(&pTableMetaInfo->name, pAlterDbMsg->db);
return TSDB_CODE_SUCCESS;
}
+int tscBuildCompactMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
+ if (pInfo->list == NULL || taosArrayGetSize(pInfo->list) <= 0) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ STscObj *pObj = pSql->pTscObj;
+ SSqlCmd *pCmd = &pSql->cmd;
+ SArray *pList = pInfo->list;
+ int32_t size = (int32_t)taosArrayGetSize(pList);
+
+ int32_t *result = malloc(sizeof(int32_t) * size);
+ if (result == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < size; i++) {
+ tSqlExprItem* pSub = taosArrayGet(pList, i);
+ tVariant* pVar = &pSub->pNode->value;
+ if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
+ result[i] = (int32_t)(pVar->i64);
+ } else {
+ free(result);
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+ }
+
+ int count = removeDupVgid(result, size);
+ pCmd->payloadLen = sizeof(SCompactMsg) + count * sizeof(int32_t);
+ pCmd->msgType = TSDB_MSG_TYPE_CM_COMPACT_VNODE;
+
+ if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
+ tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
+ free(result);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ SCompactMsg *pCompactMsg = (SCompactMsg *)pCmd->payload;
+
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+
+ if (tNameIsEmpty(&pTableMetaInfo->name)) {
+ pthread_mutex_lock(&pObj->mutex);
+ tstrncpy(pCompactMsg->db, pObj->db, sizeof(pCompactMsg->db));
+ pthread_mutex_unlock(&pObj->mutex);
+ } else {
+ tNameGetFullDbName(&pTableMetaInfo->name, pCompactMsg->db);
+ }
+
+ pCompactMsg->numOfVgroup = htons(count);
+ for (int32_t i = 0; i < count; i++) {
+ pCompactMsg->vgid[i] = htons(result[i]);
+ }
+ free(result);
+
+ return TSDB_CODE_SUCCESS;
+}
int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@@ -1657,7 +1863,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg*)pCmd->payload;
pRetrieveMsg->qId = htobe64(pSql->res.qId);
pRetrieveMsg->free = htons(pQueryInfo->type);
@@ -1681,12 +1887,12 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) {
pRes->row = 0;
pRes->rspType = 1;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
return pRes->code;
}
- tscSetResRawPtr(pRes, pQueryInfo);
+ tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted);
} else {
tscResetForNextRetrieve(pRes);
}
@@ -1705,7 +1911,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) {
int tscProcessDescribeTableRsp(SSqlObj *pSql) {
SSqlCmd * pCmd = &pSql->cmd;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@@ -1719,7 +1925,7 @@ int tscProcessLocalRetrieveRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, numOfRes);
}
-int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
+int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd* pCmd = &pSql->cmd;
@@ -1729,16 +1935,30 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
return code;
}
- pRes->code = tscDoLocalMerge(pSql);
+ if (pRes->pMerger == NULL) { // no result from subquery, so abort here directly.
+ (*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
+ return code;
+ }
+
+ // global aggregation may be the upstream for parent query
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
+ if (pQueryInfo->pQInfo == NULL) {
+ STableGroupInfo tableGroupInfo = {.numOfTables = 1, .pGroupList = taosArrayInit(1, POINTER_BYTES),};
+ tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+
+ STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN};
+
+ SArray* group = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(group, &tableKeyInfo);
+ taosArrayPush(tableGroupInfo.pGroupList, &group);
- if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- tscCreateResPointerInfo(pRes, pQueryInfo);
- tscSetResRawPtr(pRes, pQueryInfo);
+ tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self);
+ pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self);
}
- pRes->row = 0;
- pRes->completed = (pRes->numOfRows == 0);
+ uint64_t localQueryId = pSql->self;
+ qTableQuery(pQueryInfo->pQInfo, &localQueryId);
+ convertQueryResult(pRes, pQueryInfo, pSql->self, true);
code = pRes->code;
if (pRes->code == TSDB_CODE_SUCCESS) {
@@ -1784,86 +2004,35 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
-int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
-
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
-
- int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
- if (code != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
- }
-
- pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
-
- char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
-
- if (pCmd->autoCreated && pCmd->tagData.dataLen != 0) {
- pMsg = serializeTagData(&pCmd->tagData, pMsg);
- }
-
- pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
- pCmd->msgType = TSDB_MSG_TYPE_CM_TABLE_META;
-
- return TSDB_CODE_SUCCESS;
-}
-
/**
* multi table meta req pkg format:
- * | SMgmtHead | SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ......
- * no used 4B
+ * |SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ......
+ * 4B
**/
-int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
-#if 0
+int tscBuildMultiTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
- // copy payload content to temp buff
- char *tmpData = 0;
- if (pCmd->payloadLen > 0) {
- if ((tmpData = calloc(1, pCmd->payloadLen + 1)) == NULL) return -1;
- memcpy(tmpData, pCmd->payload, pCmd->payloadLen);
- }
-
- // fill head info
- SMgmtHead *pMgmt = (SMgmtHead *)(pCmd->payload + tsRpcHeadSize);
- memset(pMgmt->db, 0, TSDB_TABLE_FNAME_LEN); // server don't need the db
-
- SMultiTableInfoMsg *pInfoMsg = (SMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead));
- pInfoMsg->numOfTables = htonl((int32_t)pCmd->count);
-
- if (pCmd->payloadLen > 0) {
- memcpy(pInfoMsg->tableIds, tmpData, pCmd->payloadLen);
- }
-
- tfree(tmpData);
-
- pCmd->payloadLen += sizeof(SMgmtHead) + sizeof(SMultiTableInfoMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META;
-
assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize);
- tscDebug("0x%"PRIx64" build load multi-metermeta msg completed, numOfTables:%d, msg size:%d", pSql->self, pCmd->count,
+ tscDebug("0x%"PRIx64" build load multi-tablemeta msg completed, numOfTables:%d, msg size:%d", pSql->self, pCmd->count,
pCmd->payloadLen);
return pCmd->payloadLen;
-#endif
- return 0;
}
int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char* pMsg = pCmd->payload;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
SSTableVgroupMsg *pStableVgroupMsg = (SSTableVgroupMsg *)pMsg;
pStableVgroupMsg->numOfTables = htonl(pQueryInfo->numOfTables);
pMsg += sizeof(SSTableVgroupMsg);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, i);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pMsg);
assert(code == TSDB_CODE_SUCCESS);
@@ -1876,6 +2045,29 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
+int tscBuildRetrieveFuncMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
+ SSqlCmd *pCmd = &pSql->cmd;
+
+ char *pMsg = pCmd->payload;
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ int32_t numOfFuncs = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo);
+
+ SRetrieveFuncMsg *pRetrieveFuncMsg = (SRetrieveFuncMsg *)pMsg;
+ pRetrieveFuncMsg->num = htonl(numOfFuncs);
+
+ pMsg += sizeof(SRetrieveFuncMsg);
+ for(int32_t i = 0; i < numOfFuncs; ++i) {
+ SUdfInfo* pUdf = taosArrayGet(pQueryInfo->pUdfInfo, i);
+ STR_TO_NET_VARSTR(pMsg, pUdf->name);
+ pMsg += varDataNetTLen(pMsg);
+ }
+
+ pCmd->msgType = TSDB_MSG_TYPE_CM_RETRIEVE_FUNC;
+ pCmd->payloadLen = (int32_t)(pMsg - pCmd->payload);
+
+ return TSDB_CODE_SUCCESS;
+}
+
int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
STscObj *pObj = pSql->pTscObj;
@@ -1923,18 +2115,15 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
-int tscProcessTableMetaRsp(SSqlObj *pSql) {
- STableMetaMsg *pMetaMsg = (STableMetaMsg *)pSql->res.pRsp;
-
- pMetaMsg->tid = htonl(pMetaMsg->tid);
- pMetaMsg->sversion = htons(pMetaMsg->sversion);
- pMetaMsg->tversion = htons(pMetaMsg->tversion);
+static int32_t tableMetaMsgConvert(STableMetaMsg* pMetaMsg) {
+ pMetaMsg->tid = htonl(pMetaMsg->tid);
+ pMetaMsg->sversion = htons(pMetaMsg->sversion);
+ pMetaMsg->tversion = htons(pMetaMsg->tversion);
pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId);
- pMetaMsg->uid = htobe64(pMetaMsg->uid);
- pMetaMsg->suid = pMetaMsg->suid;
- pMetaMsg->contLen = htons(pMetaMsg->contLen);
+
+ pMetaMsg->uid = htobe64(pMetaMsg->uid);
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
-
+
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
(pMetaMsg->tid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
@@ -1969,174 +2158,365 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pSchema++;
}
-
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+// update the vgroupInfo if needed
+static void doUpdateVgroupInfo(int32_t vgId, SVgroupMsg *pVgroupMsg) {
+ assert(vgId > 0);
+
+ SNewVgroupInfo vgroupInfo = {.inUse = -1};
+ taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo);
+
+ // vgroup info exists, compare with it
+ if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, pVgroupMsg)) || (vgroupInfo.inUse < 0)) {
+ vgroupInfo = createNewVgroupInfo(pVgroupMsg);
+ taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
+ tscDebug("add/update new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
+ }
+}
+
+static void doAddTableMetaToLocalBuf(STableMeta* pTableMeta, STableMetaMsg* pMetaMsg, bool updateSTable) {
+ if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
+ // add or update the corresponding super table meta data info
+ int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
+
+ // The super tableMeta already exists, create it according to tableMeta and add it to hash map
+ if (updateSTable) {
+ STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
+ uint32_t size = tscGetTableMetaSize(pSupTableMeta);
+ int32_t code = taosHashPut(tscTableMetaMap, pTableMeta->sTableName, len, pSupTableMeta, size);
+ assert(code == TSDB_CODE_SUCCESS);
+
+ tfree(pSupTableMeta);
+ }
+
+ CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
+ taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), cMeta, sizeof(CChildTableMeta));
+ tfree(cMeta);
+ } else {
+ uint32_t s = tscGetTableMetaSize(pTableMeta);
+ taosHashPut(tscTableMetaMap, pMetaMsg->tableFname, strlen(pMetaMsg->tableFname), pTableMeta, s);
+ }
+}
+
+int tscProcessTableMetaRsp(SSqlObj *pSql) {
+ STableMetaMsg *pMetaMsg = (STableMetaMsg *)pSql->res.pRsp;
+ int32_t code = tableMetaMsgConvert(pMetaMsg);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
+ if (pTableMeta == NULL){
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
+ tfree(pTableMeta);
return TSDB_CODE_TSC_INVALID_VALUE;
}
- assert(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE || pTableMeta->tableType == TSDB_NORMAL_TABLE || pTableMeta->tableType == TSDB_STREAM_TABLE);
+ char name[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, name);
+ assert(strncmp(pMetaMsg->tableFname, name, tListLen(pMetaMsg->tableFname)) == 0);
- if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
- // check if super table hashmap or not
- int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
+ doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, true);
+ if (pTableMeta->tableType != TSDB_SUPER_TABLE) {
+ doUpdateVgroupInfo(pTableMeta->vgId, &pMetaMsg->vgroup);
+ }
- // super tableMeta data alreay exists, create it according to tableMeta and add it to hash map
- STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
+ tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
+ pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns,
+ pTableMeta->tableInfo.numOfTags);
- uint32_t size = tscGetTableMetaSize(pSupTableMeta);
- int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size);
- assert(code == TSDB_CODE_SUCCESS);
+ free(pTableMeta);
+ return TSDB_CODE_SUCCESS;
+}
- tfree(pSupTableMeta);
+static SArray* createVgroupIdListFromMsg(char* pMsg, SHashObj* pSet, char* name, int32_t* size, uint64_t id) {
+ SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
- CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
+ pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
+ *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg));
- char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, name);
+ SArray* vgroupIdList = taosArrayInit(pVgroupMsg->numOfVgroups, sizeof(int32_t));
- taosHashPut(tscTableMetaInfo, name, strlen(name), cMeta, sizeof(CChildTableMeta));
- tfree(cMeta);
+ if (pVgroupMsg->numOfVgroups <= 0) {
+ tscDebug("0x%" PRIx64 " empty vgroup id list, no corresponding tables for stable:%s", id, name);
} else {
- uint32_t s = tscGetTableMetaSize(pTableMeta);
+ // just init, no need to lock
+ for (int32_t j = 0; j < pVgroupMsg->numOfVgroups; ++j) {
+ SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
+ vmsg->vgId = htonl(vmsg->vgId);
+ for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
+ vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
+ }
- char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, name);
+ taosArrayPush(vgroupIdList, &vmsg->vgId);
- taosHashPut(tscTableMetaInfo, name, strlen(name), pTableMeta, s);
+ if (taosHashGet(pSet, &vmsg->vgId, sizeof(vmsg->vgId)) == NULL) {
+ taosHashPut(pSet, &vmsg->vgId, sizeof(vmsg->vgId), "", 0);
+ doUpdateVgroupInfo(vmsg->vgId, vmsg);
+ }
+ }
}
- // update the vgroupInfo if needed
- if (pTableMeta->vgId > 0) {
- int32_t vgId = pTableMeta->vgId;
- assert(pTableMeta->tableType != TSDB_SUPER_TABLE);
+ return vgroupIdList;
+}
+
+static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t id) {
+ SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)pMsg;
+ pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
+
+ *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg));
- SNewVgroupInfo vgroupInfo = {.inUse = -1};
- taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
+ size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
+ SVgroupsInfo *pVgroupInfo = calloc(1, vgroupsz);
+ assert(pVgroupInfo != NULL);
- if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, &pMetaMsg->vgroup)) ||
- (vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
- vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup);
- taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
- tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
+ pVgroupInfo->numOfVgroups = pVgroupMsg->numOfVgroups;
+ if (pVgroupInfo->numOfVgroups <= 0) {
+ tscDebug("0x%" PRIx64 " empty vgroup info, no corresponding tables for stable", id);
+ } else {
+ for (int32_t j = 0; j < pVgroupInfo->numOfVgroups; ++j) {
+ // just init, no need to lock
+ SVgroupInfo *pVgroup = &pVgroupInfo->vgroups[j];
+
+ SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
+ vmsg->vgId = htonl(vmsg->vgId);
+ for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
+ vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
+ }
+
+ pVgroup->numOfEps = vmsg->numOfEps;
+ pVgroup->vgId = vmsg->vgId;
+ for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
+ pVgroup->epAddr[k].port = vmsg->epAddr[k].port;
+ pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
+ }
+
+ doUpdateVgroupInfo(pVgroup->vgId, vmsg);
}
}
- tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self,
- pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns,
- pTableMeta->tableInfo.numOfTags);
+ return pVgroupInfo;
+}
- free(pTableMeta);
+int tscProcessRetrieveFuncRsp(SSqlObj* pSql) {
+ SSqlCmd* pCmd = &pSql->cmd;
+ SUdfFuncMsg* pFuncMsg = (SUdfFuncMsg *)pSql->res.pRsp;
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+
+ pFuncMsg->num = htonl(pFuncMsg->num);
+ assert(pFuncMsg->num == taosArrayGetSize(pQueryInfo->pUdfInfo));
+
+ char* pMsg = pFuncMsg->content;
+ for(int32_t i = 0; i < pFuncMsg->num; ++i) {
+ SFunctionInfoMsg* pFunc = (SFunctionInfoMsg*) pMsg;
+
+ for(int32_t j = 0; j < pFuncMsg->num; ++j) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, j);
+ if (strcmp(pUdfInfo->name, pFunc->name) != 0) {
+ continue;
+ }
+
+ if (pUdfInfo->content) {
+ continue;
+ }
+
+ pUdfInfo->resBytes = htons(pFunc->resBytes);
+ pUdfInfo->resType = pFunc->resType;
+ pUdfInfo->funcType = htonl(pFunc->funcType);
+ pUdfInfo->contLen = htonl(pFunc->len);
+ pUdfInfo->bufSize = htonl(pFunc->bufSize);
+
+ pUdfInfo->content = malloc(pUdfInfo->contLen);
+ memcpy(pUdfInfo->content, pFunc->content, pUdfInfo->contLen);
+
+ pMsg += sizeof(SFunctionInfoMsg) + pUdfInfo->contLen;
+ }
+ }
+
+ // master sqlObj locates in param
+ SSqlObj* parent = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pSql->param);
+ if(parent == NULL) {
+ return pSql->res.code;
+ }
+
+ SQueryInfo* parQueryInfo = tscGetQueryInfo(&parent->cmd);
+
+ assert(parent->signature == parent && (int64_t)pSql->param == parent->self);
+ taosArrayDestroy(parQueryInfo->pUdfInfo);
+
+ parQueryInfo->pUdfInfo = pQueryInfo->pUdfInfo; // assigned to parent sql obj.
+ pQueryInfo->pUdfInfo = NULL;
+ taosReleaseRef(tscObjRef, parent->self);
return TSDB_CODE_SUCCESS;
}
-/**
- * multi table meta rsp pkg format:
- * | STaosRsp | SMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2
- * |...... 1B 4B
- **/
-int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
-#if 0
+int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
char *rsp = pSql->res.pRsp;
- ieType = *rsp;
- if (ieType != TSDB_IE_TYPE_META) {
- tscError("invalid ie type:%d", ieType);
- pSql->res.code = TSDB_CODE_TSC_INVALID_IE;
- pSql->res.numOfTotal = 0;
- return TSDB_CODE_TSC_APP_ERROR;
+ SMultiTableMeta *pMultiMeta = (SMultiTableMeta *)rsp;
+ pMultiMeta->numOfTables = htonl(pMultiMeta->numOfTables);
+ pMultiMeta->numOfVgroup = htonl(pMultiMeta->numOfVgroup);
+ pMultiMeta->numOfUdf = htonl(pMultiMeta->numOfUdf);
+
+ rsp += sizeof(SMultiTableMeta);
+
+ SSqlObj* pParentSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pSql->param);
+ if(pParentSql == NULL) {
+ return pSql->res.code;
}
- rsp++;
+ SSqlCmd *pParentCmd = &pParentSql->cmd;
+ SHashObj *pSet = taosHashInit(pMultiMeta->numOfVgroup, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
- SMultiTableInfoMsg *pInfo = (SMultiTableInfoMsg *)rsp;
- totalNum = htonl(pInfo->numOfTables);
- rsp += sizeof(SMultiTableInfoMsg);
+ char* buf = NULL;
+ char* pMsg = pMultiMeta->meta;
- for (i = 0; i < totalNum; i++) {
- SMultiTableMeta *pMultiMeta = (SMultiTableMeta *)rsp;
- STableMeta * pMeta = pMultiMeta->metas;
+ // decompresss the message payload
+ if (pMultiMeta->compressed) {
+ buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta));
+ int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1,
+ buf, pMultiMeta->rawLen - sizeof(SMultiTableMeta), ONE_STAGE_COMP, NULL, 0);
+ assert(len == pMultiMeta->rawLen - sizeof(SMultiTableMeta));
- pMeta->sid = htonl(pMeta->sid);
- pMeta->sversion = htons(pMeta->sversion);
- pMeta->vgId = htonl(pMeta->vgId);
- pMeta->uid = htobe64(pMeta->uid);
+ pMsg = buf;
+ }
- if (pMeta->sid <= 0 || pMeta->vgId < 0) {
- tscError("invalid meter vgId:%d, sid%d", pMeta->vgId, pMeta->sid);
- pSql->res.code = TSDB_CODE_TSC_INVALID_VALUE;
- pSql->res.numOfTotal = i;
- return TSDB_CODE_TSC_APP_ERROR;
+ for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
+ STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
+ int32_t code = tableMetaMsgConvert(pMetaMsg);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosHashCleanup(pSet);
+ taosReleaseRef(tscObjRef, pParentSql->self);
+
+ tfree(buf);
+ return code;
}
- // pMeta->numOfColumns = htons(pMeta->numOfColumns);
- //
- // if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) {
- // tscError("invalid tag value count:%d", pMeta->numOfTags);
- // pSql->res.code = TSDB_CODE_TSC_INVALID_VALUE;
- // pSql->res.numOfTotal = i;
- // return TSDB_CODE_TSC_APP_ERROR;
- // }
- //
- // if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) {
- // tscError("invalid numOfTags:%d", pMeta->numOfTags);
- // pSql->res.code = TSDB_CODE_TSC_INVALID_VALUE;
- // pSql->res.numOfTotal = i;
- // return TSDB_CODE_TSC_APP_ERROR;
- // }
- //
- // if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns < 0) {
- // tscError("invalid numOfColumns:%d", pMeta->numOfColumns);
- // pSql->res.code = TSDB_CODE_TSC_INVALID_VALUE;
- // pSql->res.numOfTotal = i;
- // return TSDB_CODE_TSC_APP_ERROR;
- // }
- //
- // for (int j = 0; j < TSDB_REPLICA_MAX_NUM; ++j) {
- // pMeta->vpeerDesc[j].vnode = htonl(pMeta->vpeerDesc[j].vnode);
- // }
- //
- // pMeta->rowSize = 0;
- // rsp += sizeof(SMultiTableMeta);
- // pSchema = (SSchema *)rsp;
- //
- // int32_t numOfTotalCols = pMeta->numOfColumns + pMeta->numOfTags;
- // for (int j = 0; j < numOfTotalCols; ++j) {
- // pSchema->bytes = htons(pSchema->bytes);
- // pSchema->colId = htons(pSchema->colId);
- //
- // // ignore the tags length
- // if (j < pMeta->numOfColumns) {
- // pMeta->rowSize += pSchema->bytes;
- // }
- // pSchema++;
- // }
- //
- // rsp += numOfTotalCols * sizeof(SSchema);
- //
- // int32_t tagLen = 0;
- // SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
- //
- // if (pMeta->tableType == TSDB_CHILD_TABLE) {
- // for (int32_t j = 0; j < pMeta->numOfTags; ++j) {
- // tagLen += pTagsSchema[j].bytes;
- // }
- // }
- //
- // rsp += tagLen;
- // int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
- // }
+ bool freeMeta = false;
+ STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
+ if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
+ tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, pMetaMsg->tableFname);
+ tfree(pTableMeta);
+ taosHashCleanup(pSet);
+ taosReleaseRef(tscObjRef, pParentSql->self);
+
+ tfree(buf);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ if (pMultiMeta->metaClone == 1 || pTableMeta->tableType == TSDB_SUPER_TABLE) {
+ STableMetaVgroupInfo p = {.pTableMeta = pTableMeta,};
+ size_t keyLen = strnlen(pMetaMsg->tableFname, TSDB_TABLE_FNAME_LEN);
+ void* t = taosHashGet(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen);
+ assert(t == NULL);
+
+ taosHashPut(pParentCmd->pTableMetaMap, pMetaMsg->tableFname, keyLen, &p, sizeof(STableMetaVgroupInfo));
+ } else {
+ freeMeta = true;
+ }
+
+ // for each super table, only update meta information once
+ bool updateStableMeta = false;
+ if (pTableMeta->tableType == TSDB_CHILD_TABLE && taosHashGet(pSet, &pMetaMsg->suid, sizeof(pMetaMsg->suid)) == NULL) {
+ updateStableMeta = true;
+ taosHashPut(pSet, &pTableMeta->suid, sizeof(pMetaMsg->suid), "", 0);
+ }
+
+ // create the tableMeta and add it into the TableMeta map
+ doAddTableMetaToLocalBuf(pTableMeta, pMetaMsg, updateStableMeta);
+
+ // for each vgroup, only update the information once.
+ int64_t vgId = pMetaMsg->vgroup.vgId;
+ if (pTableMeta->tableType != TSDB_SUPER_TABLE && taosHashGet(pSet, &vgId, sizeof(vgId)) == NULL) {
+ doUpdateVgroupInfo((int32_t) vgId, &pMetaMsg->vgroup);
+ taosHashPut(pSet, &vgId, sizeof(vgId), "", 0);
+ }
+
+ pMsg += pMetaMsg->contLen;
+ if (freeMeta) {
+ tfree(pTableMeta);
+ }
}
-
+
+ for(int32_t i = 0; i < pMultiMeta->numOfVgroup; ++i) {
+ char fname[TSDB_TABLE_FNAME_LEN] = {0};
+ tstrncpy(fname, pMsg, TSDB_TABLE_FNAME_LEN);
+ size_t len = strnlen(fname, TSDB_TABLE_FNAME_LEN);
+
+ pMsg += TSDB_TABLE_FNAME_LEN;
+
+ STableMetaVgroupInfo* p = taosHashGet(pParentCmd->pTableMetaMap, fname, len);
+ assert(p != NULL);
+
+ int32_t size = 0;
+ if (p->vgroupIdList!= NULL) {
+ taosArrayDestroy(p->vgroupIdList);
+ }
+
+ p->vgroupIdList = createVgroupIdListFromMsg(pMsg, pSet, fname, &size, pSql->self);
+
+ int32_t numOfVgId = (int32_t) taosArrayGetSize(p->vgroupIdList);
+ int32_t s = sizeof(tFilePage) + numOfVgId * sizeof(int32_t);
+
+ tFilePage* idList = calloc(1, s);
+ idList->num = numOfVgId;
+ memcpy(idList->data, TARRAY_GET_START(p->vgroupIdList), numOfVgId * sizeof(int32_t));
+
+ void* idListInst = taosCachePut(tscVgroupListBuf, fname, len, idList, s, 5000);
+ taosCacheRelease(tscVgroupListBuf, (void*) &idListInst, false);
+
+ tfree(idList);
+ pMsg += size;
+ }
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pParentCmd);
+ if (pMultiMeta->numOfUdf > 0) {
+ assert(pQueryInfo->pUdfInfo != NULL);
+ }
+
+ for(int32_t i = 0; i < pMultiMeta->numOfUdf; ++i) {
+ SFunctionInfoMsg* pFunc = (SFunctionInfoMsg*) pMsg;
+
+ for(int32_t j = 0; j < pMultiMeta->numOfUdf; ++j) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, j);
+ if (strcmp(pUdfInfo->name, pFunc->name) != 0) {
+ continue;
+ }
+
+ if (pUdfInfo->content) {
+ continue;
+ }
+
+ pUdfInfo->resBytes = htons(pFunc->resBytes);
+ pUdfInfo->resType = pFunc->resType;
+ pUdfInfo->funcType = htonl(pFunc->funcType);
+ pUdfInfo->contLen = htonl(pFunc->len);
+ pUdfInfo->bufSize = htonl(pFunc->bufSize);
+
+ pUdfInfo->content = malloc(pUdfInfo->contLen);
+ memcpy(pUdfInfo->content, pFunc->content, pUdfInfo->contLen);
+
+ pMsg += sizeof(SFunctionInfoMsg) + pUdfInfo->contLen;
+ }
+ }
+
pSql->res.code = TSDB_CODE_SUCCESS;
- pSql->res.numOfTotal = i;
- tscDebug("0x%"PRIx64" load multi-metermeta resp from complete num:%d", pSql->self, pSql->res.numOfTotal);
-#endif
-
+ pSql->res.numOfTotal = pMultiMeta->numOfTables;
+ tscDebug("0x%"PRIx64" load multi-tableMeta from mnode, numOfTables:%d", pSql->self, pMultiMeta->numOfTables);
+
+ taosHashCleanup(pSet);
+ taosReleaseRef(tscObjRef, pParentSql->self);
+
+ tfree(buf);
return TSDB_CODE_SUCCESS;
}
@@ -2148,68 +2528,45 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
}
assert(parent->signature == parent && (int64_t)pSql->param == parent->self);
-
+
SSqlRes* pRes = &pSql->res;
-
+
// NOTE: the order of several table must be preserved.
SSTableVgroupRspMsg *pStableVgroup = (SSTableVgroupRspMsg *)pRes->pRsp;
pStableVgroup->numOfTables = htonl(pStableVgroup->numOfTables);
char *pMsg = pRes->pRsp + sizeof(SSTableVgroupRspMsg);
-
- SSqlCmd* pCmd = &parent->cmd;
- for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) {
- STableMetaInfo *pInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
-
- SVgroupsMsg * pVgroupMsg = (SVgroupsMsg *) pMsg;
- pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
-
- size_t size = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg);
- size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
- pInfo->vgroupList = calloc(1, vgroupsz);
- assert(pInfo->vgroupList != NULL);
-
- pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
- if (pInfo->vgroupList->numOfVgroups <= 0) {
- tscDebug("0x%"PRIx64" empty vgroup info", pSql->self);
- } else {
- for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
- // just init, no need to lock
- SVgroupInfo *pVgroup = &pInfo->vgroupList->vgroups[j];
-
- SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
- vmsg->vgId = htonl(vmsg->vgId);
- vmsg->numOfEps = vmsg->numOfEps;
- for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
- vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port);
- }
+ SSqlCmd* pCmd = &parent->cmd;
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
- SNewVgroupInfo newVi = createNewVgroupInfo(vmsg);
- pVgroup->numOfEps = newVi.numOfEps;
- pVgroup->vgId = newVi.vgId;
- for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
- pVgroup->epAddr[k].port = newVi.ep[k].port;
- pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN);
- }
+ char fName[TSDB_TABLE_FNAME_LEN] = {0};
+ for(int32_t i = 0; i < pStableVgroup->numOfTables; ++i) {
+ char* name = pMsg;
+ pMsg += TSDB_TABLE_FNAME_LEN;
- // check if current buffer contains the vgroup info.
- // If not, add it
- SNewVgroupInfo existVgroupInfo = {.inUse = -1};
- taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo));
+ STableMetaInfo *pInfo = NULL;
+ for(int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
+ STableMetaInfo *pInfo1 = tscGetTableMetaInfoFromCmd(pCmd, j);
+ memset(fName, 0, tListLen(fName));
- if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) ||
- (existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
- taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi));
- tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", newVi.vgId, (int32_t) taosHashGetSize(tscVgroupMap));
- }
+ tNameExtractFullName(&pInfo1->name, fName);
+ if (strcmp(name, fName) != 0) {
+ continue;
}
+
+ pInfo = pInfo1;
+ break;
}
+ if (!pInfo){
+ continue;
+ }
+ int32_t size = 0;
+ pInfo->vgroupList = createVgroupInfoFromMsg(pMsg, &size, pSql->self);
pMsg += size;
}
taosReleaseRef(tscObjRef, parent->self);
-
return pSql->res.code;
}
@@ -2221,7 +2578,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -2253,16 +2610,17 @@ int tscProcessShowRsp(SSqlObj *pSql) {
SColumnIndex index = {0};
pSchema = pMetaMsg->schema;
-
+
+ uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
for (int16_t i = 0; i < pMetaMsg->numOfColumns; ++i, ++pSchema) {
index.columnIndex = i;
- tscColumnListInsert(pQueryInfo->colList, &index);
+ tscColumnListInsert(pQueryInfo->colList, i, uid, pSchema);
TAOS_FIELD f = tscCreateField(pSchema->type, pSchema->name, pSchema->bytes);
SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f);
- pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index,
- pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false);
+ pInfo->pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index,
+ pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pCmd), pTableSchema[i].bytes, false);
}
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
@@ -2280,7 +2638,7 @@ static void createHbObj(STscObj* pObj) {
pSql->fp = tscProcessHeartBeatRsp;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfoS(&pSql->cmd);
if (pQueryInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
tfree(pSql);
@@ -2346,7 +2704,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
pthread_mutex_lock(&pObj->mutex);
int ret = tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
@@ -2355,30 +2713,26 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
return ret;
}
+//todo only invalid the buffered data that belongs to dropped databases
int tscProcessDropDbRsp(SSqlObj *pSql) {
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaMap);
+ taosHashClear(tscVgroupMap);
+ taosCacheEmpty(tscVgroupListBuf);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
-
- //The cached tableMeta is expired in this case, so clean it in hash table
- char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, name);
-
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
- tscDebug("0x%"PRIx64" remove table meta after drop table:%s, numOfRemain:%d", pSql->self, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
-
- pTableMetaInfo->pTableMeta = NULL;
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
+ tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
+ tfree(pTableMetaInfo->pTableMeta);
return 0;
}
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
@@ -2386,11 +2740,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tscDebug("0x%"PRIx64" remove tableMeta in hashMap after alter-table: %s", pSql->self, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
- taosHashEmpty(tscTableMetaInfo);
+ taosHashClear(tscTableMetaMap);
}
return 0;
@@ -2400,6 +2754,10 @@ int tscProcessAlterDbMsgRsp(SSqlObj *pSql) {
UNUSED(pSql);
return 0;
}
+int tscProcessCompactRsp(SSqlObj *pSql) {
+ UNUSED(pSql);
+ return TSDB_CODE_SUCCESS;
+}
int tscProcessShowCreateRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, 1);
@@ -2408,11 +2766,12 @@ int tscProcessShowCreateRsp(SSqlObj *pSql) {
int tscProcessQueryRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
- SQueryTableRsp *pQuery = (SQueryTableRsp *)pRes->pRsp;
- pQuery->qId = htobe64(pQuery->qId);
- pRes->qId = pQuery->qId;
+ SQueryTableRsp *pQueryAttr = (SQueryTableRsp *)pRes->pRsp;
+ pQueryAttr->qId = htobe64(pQueryAttr->qId);
+ pRes->qId = pQueryAttr->qId;
pRes->data = NULL;
+
tscResetForNextRetrieve(pRes);
tscDebug("0x%"PRIx64" query rsp received, qId:0x%"PRIx64, pSql->self, pRes->qId);
return 0;
@@ -2437,18 +2796,19 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
pRes->completed = (pRetrieve->completed == 1);
pRes->data = pRetrieve->data;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
return pRes->code;
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- if (pCmd->command == TSDB_SQL_RETRIEVE) {
- tscSetResRawPtr(pRes, pQueryInfo);
- } else if ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) {
- tscSetResRawPtr(pRes, pQueryInfo);
- } else if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
- tscSetResRawPtr(pRes, pQueryInfo);
+ if ((pCmd->command == TSDB_SQL_RETRIEVE) ||
+ ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) ||
+ (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE))) {
+ tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted);
}
if (pSql->pSubscription != NULL) {
@@ -2480,54 +2840,61 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
-static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
+static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool autocreate) {
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
if (NULL == pNew) {
tscError("0x%"PRIx64" malloc failed for new sqlobj to get table meta", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- pNew->pTscObj = pSql->pTscObj;
- pNew->signature = pNew;
+ pNew->pTscObj = pSql->pTscObj;
+ pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_META;
- tscAddSubqueryInfo(&pNew->cmd);
+ tscAddQueryInfo(&pNew->cmd);
- SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
-
- pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
+
tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
- assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
+ STableMetaInfo *pNewTableMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
+ assert(pNewQueryInfo->numOfTables == 1);
- tNameAssign(&pNewMeterMetaInfo->name, &pTableMetaInfo->name);
-
- if (pSql->cmd.autoCreated) {
- int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
- if (code != TSDB_CODE_SUCCESS) {
- tscError("0x%"PRIx64" malloc failed for new tag data to get table meta", pSql->self);
- tscFreeSqlObj(pNew);
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
- }
+ tNameAssign(&pNewTableMetaInfo->name, &pTableMetaInfo->name);
registerSqlObj(pNew);
- tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get tableMeta, auto create:%d", pSql->self, pNew->self,
- pNew->cmd.autoCreated);
- pNew->fp = tscTableMetaCallBack;
+ pNew->fp = tscTableMetaCallBack;
pNew->param = (void *)pSql->self;
- tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self);
-
+ tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get tableMeta, auto create:%d, metaRid from %"PRId64" to %"PRId64,
+ pSql->self, pNew->self, autocreate, pSql->metaRid, pNew->self);
pSql->metaRid = pNew->self;
- int32_t code = tscProcessSql(pNew);
+ {
+ STableInfoMsg *pInfoMsg = (STableInfoMsg *)pNew->cmd.payload;
+ int32_t code = tNameExtractFullName(&pNewTableMetaInfo->name, pInfoMsg->tableFname);
+ if (code != TSDB_CODE_SUCCESS) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ pInfoMsg->createFlag = htons(autocreate? 1 : 0);
+ char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
+
+ // tag data exists
+ if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) {
+ pMsg = serializeTagData(&pSql->cmd.insertParam.tagData, pMsg);
+ }
+
+ pNew->cmd.payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
+ pNew->cmd.msgType = TSDB_MSG_TYPE_CM_TABLE_META;
+ }
+
+ int32_t code = tscBuildAndSendRequest(pNew, NULL);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
}
@@ -2535,92 +2902,245 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
return code;
}
-int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
- assert(tIsValidName(&pTableMetaInfo->name));
+int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVgroupNameList, SArray* pUdfList, __async_cb_func_t fp, bool metaClone) {
+ SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
+ if (NULL == pNew) {
+ tscError("0x%"PRIx64" failed to allocate sqlobj to get multiple table meta", pSql->self);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
- uint32_t size = tscGetTableMetaMaxSize();
- if (pTableMetaInfo->pTableMeta == NULL) {
- pTableMetaInfo->pTableMeta = calloc(1, size);
- pTableMetaInfo->tableMetaSize = size;
- } else if (pTableMetaInfo->tableMetaSize < size) {
- char *tmp = realloc(pTableMetaInfo->pTableMeta, size);
- if (tmp == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ pNew->pTscObj = pSql->pTscObj;
+ pNew->signature = pNew;
+ pNew->cmd.command = TSDB_SQL_MULTI_META;
+
+ int32_t numOfTable = (int32_t) taosArrayGetSize(pNameList);
+ int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pVgroupNameList);
+ int32_t numOfUdf = pUdfList ? (int32_t)taosArrayGetSize(pUdfList) : 0;
+
+ int32_t size = (numOfTable + numOfVgroupList) * TSDB_TABLE_FNAME_LEN + TSDB_FUNC_NAME_LEN * numOfUdf + sizeof(SMultiTableInfoMsg);
+ if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, size)) {
+ tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
+ tscFreeSqlObj(pNew);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ SMultiTableInfoMsg* pInfo = (SMultiTableInfoMsg*) pNew->cmd.payload;
+ pInfo->metaClone = metaClone? 1:0;
+ pInfo->numOfTables = htonl((uint32_t) taosArrayGetSize(pNameList));
+ pInfo->numOfVgroups = htonl((uint32_t) taosArrayGetSize(pVgroupNameList));
+ pInfo->numOfUdfs = htonl(numOfUdf);
+
+ char* start = pInfo->tableNames;
+ int32_t len = 0;
+ for(int32_t i = 0; i < numOfTable; ++i) {
+ char* name = taosArrayGetP(pNameList, i);
+ if (i < numOfTable - 1 || numOfVgroupList > 0 || numOfUdf > 0) {
+ len = sprintf(start, "%s,", name);
+ } else {
+ len = sprintf(start, "%s", name);
+ }
+
+ start += len;
+ }
+
+ for(int32_t i = 0; i < numOfVgroupList; ++i) {
+ char* name = taosArrayGetP(pVgroupNameList, i);
+ if (i < numOfVgroupList - 1 || numOfUdf > 0) {
+ len = sprintf(start, "%s,", name);
+ } else {
+ len = sprintf(start, "%s", name);
+ }
+
+ start += len;
+ }
+
+ for(int32_t i = 0; i < numOfUdf; ++i) {
+ SUdfInfo * u = taosArrayGet(pUdfList, i);
+ if (i < numOfUdf - 1) {
+ len = sprintf(start, "%s,", u->name);
+ } else {
+ len = sprintf(start, "%s", u->name);
}
- pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
+
+ start += len;
+ }
+
+ pNew->cmd.payloadLen = (int32_t) ((start - pInfo->tableNames) + sizeof(SMultiTableInfoMsg));
+ pNew->cmd.msgType = TSDB_MSG_TYPE_CM_TABLES_META;
+
+ registerSqlObj(pNew);
+ tscDebug("0x%"PRIx64" new pSqlObj:0x%"PRIx64" to get %d tableMeta, vgroupInfo:%d, udf:%d, msg size:%d", pSql->self,
+ pNew->self, numOfTable, numOfVgroupList, numOfUdf, pNew->cmd.payloadLen);
+
+ pNew->fp = fp;
+ pNew->param = (void *)pSql->rootObj->self;
+
+ tscDebug("0x%"PRIx64" metaRid from 0x%" PRIx64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self);
+
+ pSql->metaRid = pNew->self;
+ int32_t code = tscBuildAndSendRequest(pNew, NULL);
+ if (code == TSDB_CODE_SUCCESS) {
+ code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
}
- memset(pTableMetaInfo->pTableMeta, 0, size);
- pTableMetaInfo->tableMetaSize = size;
+ return code;
+}
- pTableMetaInfo->pTableMeta->tableType = -1;
- pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
+int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool autocreate, bool onlyLocal) {
+ assert(tIsValidName(&pTableMetaInfo->name));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
- taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
-
- // TODO resize the tableMeta
- char buf[80*1024] = {0};
- assert(size < 80*1024);
+ if (pTableMetaInfo->tableMetaCapacity != 0) {
+ if (pTableMetaInfo->pTableMeta != NULL) {
+ memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
+ }
+ }
+ if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity)) {
+ tfree(pTableMetaInfo->pTableMeta);
+ pTableMetaInfo->tableMetaCapacity = 0;
+ }
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
- if (pMeta->id.uid > 0) {
+ if (pMeta && pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
- int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, buf);
+ int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity);
+ pMeta = pTableMetaInfo->pTableMeta;
if (code != TSDB_CODE_SUCCESS) {
- return getTableMetaFromMnode(pSql, pTableMetaInfo);
+ return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
}
+ tscDebug("0x%"PRIx64 " %s retrieve tableMeta from cache, numOfCols:%d, numOfTags:%d", pSql->self, name, pMeta->tableInfo.numOfColumns, pMeta->tableInfo.numOfTags);
return TSDB_CODE_SUCCESS;
}
- return getTableMetaFromMnode(pSql, pTableMetaInfo);
+ if (onlyLocal) {
+ return TSDB_CODE_TSC_NO_META_CACHED;
+ }
+
+ return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
+}
+
+int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
+ return tscGetTableMetaImpl(pSql, pTableMetaInfo, false, false);
}
-int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists) {
- pSql->cmd.autoCreated = createIfNotExists;
- return tscGetTableMeta(pSql, pTableMetaInfo);
+int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists, bool onlyLocal) {
+ return tscGetTableMetaImpl(pSql, pTableMetaInfo, createIfNotExists, onlyLocal);
+}
+
+int32_t tscGetUdfFromNode(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
+ SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
+ if (NULL == pNew) {
+ tscError("%p malloc failed for new sqlobj to get user-defined functions", pSql);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ pNew->pTscObj = pSql->pTscObj;
+ pNew->signature = pNew;
+ pNew->cmd.command = TSDB_SQL_RETRIEVE_FUNC;
+
+ if (tscAddQueryInfo(&pNew->cmd) != TSDB_CODE_SUCCESS) {
+ tscError("%p malloc failed for new queryinfo", pSql);
+ tscFreeSqlObj(pNew);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd);
+
+ pNewQueryInfo->pUdfInfo = taosArrayInit(4, sizeof(SUdfInfo));
+ for(int32_t i = 0; i < taosArrayGetSize(pQueryInfo->pUdfInfo); ++i) {
+ SUdfInfo info = {0};
+ SUdfInfo* p1 = taosArrayGet(pQueryInfo->pUdfInfo, i);
+ info = *p1;
+ info.name = strdup(p1->name);
+ taosArrayPush(pNewQueryInfo->pUdfInfo, &info);
+ }
+
+ pNew->cmd.active = pNewQueryInfo;
+
+ if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
+ tscError("%p malloc failed for payload to get table meta", pSql);
+ tscFreeSqlObj(pNew);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ tscDebug("%p new pSqlObj:%p to retrieve udf", pSql, pNew);
+ registerSqlObj(pNew);
+
+ pNew->fp = tscTableMetaCallBack;
+ pNew->param = (void *)pSql->self;
+
+ tscDebug("%p metaRid from %" PRId64 " to %" PRId64 , pSql, pSql->metaRid, pNew->self);
+
+ pSql->metaRid = pNew->self;
+
+ int32_t code = tscBuildAndSendRequest(pNew, NULL);
+ if (code == TSDB_CODE_SUCCESS) {
+ code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
+ }
+
+ return code;
+}
+
+static void freeElem(void* p) {
+ tfree(*(char**)p);
}
/**
- * retrieve table meta from mnode, and update the local table meta hashmap.
+ * retrieve table meta from mnode, and then update the local table meta hashmap.
* @param pSql sql object
* @param tableIndex table index
* @return status code
*/
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
- SSqlCmd *pCmd = &pSql->cmd;
+ SSqlCmd* pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" failed to generate the table full name", pSql->self);
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
- tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql->self, name,
+ tscDebug("0x%"PRIx64" update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRIu64, pSql->self, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
- size_t len = strlen(name);
- taosHashRemove(tscTableMetaInfo, name, len);
+ tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
+
+ pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
+ pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+
+ SSqlCmd* pCmd2 = &pSql->rootObj->cmd;
+ pCmd2->pTableMetaMap = tscCleanupTableMetaMap(pCmd2->pTableMetaMap);
+ pCmd2->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+
+ pSql->rootObj->retryReason = pSql->retryReason;
+
+ SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
+ SArray* vgroupList = taosArrayInit(1, POINTER_BYTES);
+
+ char* n = strdup(name);
+ taosArrayPush(pNameList, &n);
+ code = getMultiTableMetaFromMnode(pSql, pNameList, vgroupList, NULL, tscTableMetaCallBack, true);
+ taosArrayDestroyEx(pNameList, freeElem);
+ taosArrayDestroyEx(vgroupList, freeElem);
- return getTableMetaFromMnode(pSql, pTableMetaInfo);
+ return code;
}
-static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
+static bool allVgroupInfoRetrieved(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
if (pTableMetaInfo->vgroupList == NULL) {
@@ -2632,14 +3152,11 @@ static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) {
return true;
}
-int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
- int code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
- SSqlCmd *pCmd = &pSql->cmd;
-
- if (allVgroupInfoRetrieved(pCmd, clauseIndex)) {
+int tscGetSTableVgroupInfo(SSqlObj *pSql, SQueryInfo* pQueryInfo) {
+ int32_t code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ if (allVgroupInfoRetrieved(pQueryInfo)) {
return TSDB_CODE_SUCCESS;
}
-
SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
@@ -2647,13 +3164,12 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
// TODO TEST IT
- SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd);
if (pNewQueryInfo == NULL) {
tscFreeSqlObj(pNew);
return code;
}
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
+
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
@@ -2671,13 +3187,11 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
tscDebug("0x%"PRIx64" svgroupRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->svgroupRid, pNew->self);
pSql->svgroupRid = pNew->self;
-
-
tscDebug("0x%"PRIx64" new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql->self, pNew, pNewQueryInfo->numOfTables);
pNew->fp = tscTableMetaCallBack;
pNew->param = (void *)pSql->self;
- code = tscProcessSql(pNew);
+ code = tscBuildAndSendRequest(pNew, NULL);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
@@ -2692,6 +3206,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_CREATE_DB] = tscBuildCreateDbMsg;
tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildUserMsg;
+ tscBuildMsg[TSDB_SQL_CREATE_FUNCTION] = tscBuildCreateFuncMsg;
tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
@@ -2700,6 +3215,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
+ tscBuildMsg[TSDB_SQL_DROP_FUNCTION] = tscBuildDropFuncMsg;
tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
@@ -2709,12 +3225,12 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_TABLE] = tscBuildAlterTableMsg;
tscBuildMsg[TSDB_SQL_UPDATE_TAGS_VAL] = tscBuildUpdateTagMsg;
tscBuildMsg[TSDB_SQL_ALTER_DB] = tscAlterDbMsg;
+ tscBuildMsg[TSDB_SQL_COMPACT_VNODE] = tscBuildCompactMsg;
tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg;
tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg;
- tscBuildMsg[TSDB_SQL_META] = tscBuildTableMetaMsg;
tscBuildMsg[TSDB_SQL_STABLEVGROUP] = tscBuildSTableVgroupMsg;
- tscBuildMsg[TSDB_SQL_MULTI_META] = tscBuildMultiMeterMetaMsg;
+ tscBuildMsg[TSDB_SQL_RETRIEVE_FUNC] = tscBuildRetrieveFuncMsg;
tscBuildMsg[TSDB_SQL_HB] = tscBuildHeartBeatMsg;
tscBuildMsg[TSDB_SQL_SHOW] = tscBuildShowMsg;
@@ -2732,7 +3248,8 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_USE_DB] = tscProcessUseDbRsp;
tscProcessMsgRsp[TSDB_SQL_META] = tscProcessTableMetaRsp;
tscProcessMsgRsp[TSDB_SQL_STABLEVGROUP] = tscProcessSTableVgroupRsp;
- tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiMeterMetaRsp;
+ tscProcessMsgRsp[TSDB_SQL_MULTI_META] = tscProcessMultiTableMetaRsp;
+ tscProcessMsgRsp[TSDB_SQL_RETRIEVE_FUNC] = tscProcessRetrieveFuncRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp;
tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromNode; // rsp handled by same function.
@@ -2746,17 +3263,20 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp;
- tscProcessMsgRsp[TSDB_SQL_RETRIEVE_LOCALMERGE] = tscProcessRetrieveLocalMergeRsp;
+ tscProcessMsgRsp[TSDB_SQL_RETRIEVE_GLOBALMERGE] = tscProcessRetrieveGlobalMergeRsp;
tscProcessMsgRsp[TSDB_SQL_ALTER_TABLE] = tscProcessAlterTableMsgRsp;
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
+ tscProcessMsgRsp[TSDB_SQL_COMPACT_VNODE] = tscProcessCompactRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
+ tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_STABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
-
+
tscKeepConn[TSDB_SQL_SHOW] = 1;
tscKeepConn[TSDB_SQL_RETRIEVE] = 1;
tscKeepConn[TSDB_SQL_SELECT] = 1;
tscKeepConn[TSDB_SQL_FETCH] = 1;
tscKeepConn[TSDB_SQL_HB] = 1;
}
+
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 02cd2bd5ef6ebc33752ec1eb46642bb65cd2626f..e292db30fd1983d78e3712238b2de6cd18638956 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -191,7 +191,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
pSql->fp = syncConnCallback;
pSql->param = pSql;
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
tsem_wait(&pSql->rspSem);
if (pSql->res.code != TSDB_CODE_SUCCESS) {
@@ -265,7 +265,7 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
if (taos) *taos = pObj;
pSql->fetchFp = fp;
- pSql->res.code = tscProcessSql(pSql);
+ pSql->res.code = tscBuildAndSendRequest(pSql, NULL);
tscDebug("%p DB async connection is opening", taos);
return pObj;
}
@@ -373,11 +373,15 @@ int taos_num_fields(TAOS_RES *res) {
if (pSql == NULL || pSql->signature != pSql) return 0;
int32_t num = 0;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (pQueryInfo == NULL) {
return num;
}
+ while(pQueryInfo->pDownstream != NULL) {
+ pQueryInfo = pQueryInfo->pDownstream;
+ }
+
size_t numOfCols = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfCols; ++i) {
SInternalField* pInfo = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
@@ -408,7 +412,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
SSqlRes *pRes = &pSql->res;
if (pSql == NULL || pSql->signature != pSql) return 0;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (pQueryInfo == NULL) {
return NULL;
}
@@ -452,11 +456,12 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
return (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
- pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
+ pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
+ pCmd->command == TSDB_SQL_SHOW_CREATE_STABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
@@ -559,9 +564,9 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
return true;
}
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
- if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
+ if ((pQueryInfo == NULL) || pQueryInfo->globalMerge) {
return true;
}
@@ -578,7 +583,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]);
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
return false;
}
@@ -613,7 +618,7 @@ int taos_errno(TAOS_RES *tres) {
* why the sql is invalid
*/
static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
- if (code != TSDB_CODE_TSC_INVALID_SQL
+ if (code != TSDB_CODE_TSC_INVALID_OPERATION
&& code != TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
return false;
}
@@ -622,7 +627,7 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
char *z = NULL;
if (len > 0) {
- z = strstr(pCmd->payload, "invalid SQL");
+ z = strstr(pCmd->payload, "invalid operation");
if (z == NULL) {
z = strstr(pCmd->payload, "syntax error");
}
@@ -672,9 +677,9 @@ char *taos_get_client_info() { return version; }
static void tscKillSTableQuery(SSqlObj *pSql) {
SSqlCmd* pCmd = &pSql->cmd;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
- if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
+ if (!pQueryInfo->globalMerge) {
return;
}
@@ -723,9 +728,9 @@ void taos_stop_query(TAOS_RES *res) {
// set the error code for master pSqlObj firstly
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
- if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
+ if (pQueryInfo->globalMerge) {
assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql);
} else {
@@ -753,7 +758,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
return true;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (pQueryInfo == NULL) {
return true;
}
@@ -828,9 +833,9 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
case TSDB_DATA_TYPE_NCHAR: {
int32_t charLen = varDataLen((char*)row[i] - VARSTR_HEADER_SIZE);
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
- assert(charLen <= fields[i].bytes);
+ assert(charLen <= fields[i].bytes && charLen >= 0);
} else {
- assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE);
+ assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0);
}
memcpy(str + len, row[i], charLen);
@@ -867,15 +872,12 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
- pSql->pTscObj = taos;
+ pSql->pTscObj = taos;
pSql->signature = pSql;
-
- SSqlRes *pRes = &pSql->res;
+ pSql->rootObj = pSql;
SSqlCmd *pCmd = &pSql->cmd;
- pRes->numOfTotal = 0;
- pRes->numOfClauseTotal = 0;
-
+ pCmd->resColumnId = TSDB_RES_COL_ID;
tscDebug("0x%"PRIx64" Valid SQL: %s pObj:%p", pSql->self, sql, pObj);
@@ -886,7 +888,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);
@@ -895,10 +899,10 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
strtolower(pSql->sqlstr, sql);
- pCmd->curSql = NULL;
- if (NULL != pCmd->pTableBlockHashList) {
- taosHashCleanup(pCmd->pTableBlockHashList);
- pCmd->pTableBlockHashList = NULL;
+// pCmd->curSql = NULL;
+ if (NULL != pCmd->insertParam.pTableBlockHashList) {
+ taosHashCleanup(pCmd->insertParam.pTableBlockHashList);
+ pCmd->insertParam.pTableBlockHashList = NULL;
}
pSql->fp = asyncCallback;
@@ -920,90 +924,19 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return code;
}
-static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
- // must before clean the sqlcmd object
- tscResetSqlCmd(&pSql->cmd, false);
-
- SSqlCmd *pCmd = &pSql->cmd;
-
- pCmd->command = TSDB_SQL_MULTI_META;
- pCmd->count = 0;
-
- int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
- char *str = (char *)tblNameList;
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
- if (pQueryInfo == NULL) {
- pSql->res.code = terrno;
- return terrno;
- }
-
- STableMetaInfo *pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
-
- if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- char *nextStr;
- char tblName[TSDB_TABLE_FNAME_LEN];
- int payloadLen = 0;
- char *pMsg = pCmd->payload;
- while (1) {
- nextStr = strchr(str, ',');
- if (nextStr == NULL) {
- break;
- }
-
- memcpy(tblName, str, nextStr - str);
- int32_t len = (int32_t)(nextStr - str);
- tblName[len] = '\0';
-
- str = nextStr + 1;
- len = (int32_t)strtrim(tblName);
-
- SStrToken sToken = {.n = len, .type = TK_ID, .z = tblName};
- tGetToken(tblName, &sToken.type);
-
- // Check if the table name available or not
- if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
- code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
- sprintf(pCmd->payload, "table name is invalid");
- return code;
- }
-
- if ((code = tscSetTableFullName(pTableMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (++pCmd->count > TSDB_MULTI_TABLEMETA_MAX_NUM) {
- code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
- sprintf(pCmd->payload, "tables over the max number");
- return code;
- }
-
- int32_t xlen = tNameLen(&pTableMetaInfo->name);
- if (payloadLen + xlen + 128 >= pCmd->allocSize) {
- char *pNewMem = realloc(pCmd->payload, pCmd->allocSize + tblListLen);
- if (pNewMem == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- sprintf(pCmd->payload, "failed to allocate memory");
- return code;
- }
-
- pCmd->payload = pNewMem;
- pCmd->allocSize = pCmd->allocSize + tblListLen;
- pMsg = pCmd->payload;
- }
-
- char n[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, n);
- payloadLen += sprintf(pMsg + payloadLen, "%s,", n);
+void loadMultiTableMetaCallback(void *param, TAOS_RES *res, int code) {
+ SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
+ if (pSql == NULL) {
+ return;
}
- *(pMsg + payloadLen) = '\0';
- pCmd->payloadLen = payloadLen + 1;
+ taosReleaseRef(tscObjRef, pSql->self);
+ pSql->res.code = code;
+ tsem_post(&pSql->rspSem);
+}
- return TSDB_CODE_SUCCESS;
+static void freeElem(void* p) {
+ tfree(*(char**)p);
}
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
@@ -1015,55 +948,71 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
return TSDB_CODE_TSC_DISCONNECTED;
}
- SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
- pSql->pTscObj = taos;
- pSql->signature = pSql;
-
- SSqlRes *pRes = &pSql->res;
-
- pRes->code = 0;
- pRes->numOfTotal = 0; // the number of getting table meta from server
- pRes->numOfClauseTotal = 0;
-
- assert(pSql->fp == NULL);
- tscDebug("0x%"PRIx64" tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj);
+ int32_t length = (int32_t)strlen(tableNameList);
+ if (length == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
- int32_t tblListLen = (int32_t)strlen(tableNameList);
- if (tblListLen > MAX_TABLE_NAME_LENGTH) {
- tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
- tscFreeSqlObj(pSql);
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (length > MAX_TABLE_NAME_LENGTH) {
+ tscError("tableNameList too long, length:%d, maximum allowed:%d", length, MAX_TABLE_NAME_LENGTH);
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- char *str = calloc(1, tblListLen + 1);
+ char *str = calloc(1, length + 1);
if (str == NULL) {
- tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
- tscFreeSqlObj(pSql);
+ tscError("failed to allocate sql string buffer, size:%d", length);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strtolower(str, tableNameList);
- int32_t code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen);
-
- /*
- * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.
- * If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql()
- * to free connection, which may cause segment fault, when the parse phrase is not even successfully executed.
- */
- pRes->qId = 0;
+ SArray* plist = taosArrayInit(4, POINTER_BYTES);
+ if (plist == NULL) {
+ tfree(str);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ SArray* vgroupList = taosArrayInit(4, POINTER_BYTES);
+ if (vgroupList == NULL) {
+ taosArrayDestroy(plist);
+ tfree(str);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
+ tscAllocPayload(&pSql->cmd, 1024);
+
+ pSql->pTscObj = taos;
+ pSql->signature = pSql;
+ pSql->rootObj = pSql;
+
+ int32_t code = (uint8_t) tscTransferTableNameList(pSql, str, length, plist);
free(str);
if (code != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
+ taosArrayDestroyEx(plist, freeElem);
+ taosArrayDestroyEx(vgroupList, freeElem);
return code;
}
- tscDoQuery(pSql);
+ pSql->cmd.pTableMetaMap = taosHashInit(taosArrayGetSize(plist), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ registerSqlObj(pSql);
+ tscDebug("0x%"PRIx64" load multiple table meta, tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj);
+
+ code = getMultiTableMetaFromMnode(pSql, plist, vgroupList, NULL, loadMultiTableMetaCallback, false);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ code = TSDB_CODE_SUCCESS;
+ }
+
+ taosArrayDestroyEx(plist, freeElem);
+ taosArrayDestroyEx(vgroupList, freeElem);
- tscDebug("0x%"PRIx64" load multi table meta result:%d %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
- if ((code = pRes->code) != TSDB_CODE_SUCCESS) {
- tscFreeSqlObj(pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscFreeRegisteredSqlObj(pSql);
+ return code;
}
+ tsem_wait(&pSql->rspSem);
+ tscFreeRegisteredSqlObj(pSql);
return code;
}
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index de2f0087ce50603b100bf3c384be3dea13e05f2a..63459a5979ef4545a1e3c56048bebdf236ce1e9b 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -13,7 +13,6 @@
* along with this program. If not, see .
*/
-#include
#include "os.h"
#include "taosmsg.h"
#include "tscLog.h"
@@ -38,8 +37,8 @@ static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t l
static bool isProjectStream(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId != TSDB_FUNC_PRJ) {
+ SExprInfo *pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId != TSDB_FUNC_PRJ) {
return false;
}
}
@@ -54,9 +53,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
- if (prec == TSDB_TIME_PRECISION_MICRO) {
- slidingTime = slidingTime / 1000;
- }
+ slidingTime = convertTimePrecision(slidingTime, pStream->precision, TSDB_TIME_PRECISION_MILLI);
if (slidingTime < retryDelta) {
return slidingTime;
@@ -90,12 +87,12 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
return;
}
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == 0 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- code = tscGetSTableVgroupInfo(pSql, 0);
+ code = tscGetSTableVgroupInfo(pSql, pQueryInfo);
}
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
@@ -103,7 +100,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
}
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) {
- tscDebug("%p empty vgroup list", pSql);
+ tscDebug("0x%"PRIx64" empty vgroup list", pSql->self);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
code = TSDB_CODE_TSC_APP_ERROR;
}
@@ -111,15 +108,14 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformFuncForSTableQuery(pQueryInfo);
-
-
+
tscDebug("0x%"PRIx64" stream:%p, start stream query on:%s QueryInfo->skey=%"PRId64" ekey=%"PRId64" ", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name), pQueryInfo->window.skey, pQueryInfo->window.ekey);
pQueryInfo->command = TSDB_SQL_SELECT;
-
+
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
- tscDoQuery(pSql);
+ executeQuery(pSql, pQueryInfo);
tscIncStreamExecutionCount(pStream);
} else {
setRetryInfo(pStream, code);
@@ -141,11 +137,12 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pStream->numOfRes = 0; // reset the numOfRes.
SSqlObj *pSql = pStream->pSql;
+
// pSql == NULL maybe killStream already called
if(pSql == NULL) {
return ;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
tscDebug("0x%"PRIx64" add into timer", pSql->self);
if (pStream->isProject) {
@@ -163,11 +160,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pQueryInfo->window.skey = pStream->stime;
int64_t etime = taosGetTimestamp(pStream->precision);
// delay to wait all data in last time window
- if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
- etime -= tsMaxStreamComputDelay * 1000l;
- } else {
- etime -= tsMaxStreamComputDelay;
- }
+ etime -= convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
@@ -184,8 +177,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
- } else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
- timer /= 1000l;
+ } else {
+ timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
}
tscSetRetryTimer(pStream, pSql, timer);
return;
@@ -205,20 +198,20 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
- tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self, pStream, numOfRows,
- retryDelay);
+ tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
+ pStream, numOfRows, retryDelay);
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
tscFreeSqlResult(pStream->pSql);
- tscFreeSubobj(pStream->pSql);
+ tscFreeSubobj(pStream->pSql);
tfree(pStream->pSql->pSubs);
pStream->pSql->subState.numOfSub = 0;
@@ -235,7 +228,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) {
#if 0
SSqlObj * pSql = pStream->pSql;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) {
return;
@@ -278,13 +271,14 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
- tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql->self, pStream, numOfRows, retryDelayTime);
+ tscError("stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
return;
}
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ STableMetaInfo *pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
for(int32_t i = 0; i < numOfRows; ++i) {
@@ -324,6 +318,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
+ if (pQueryInfo->pQInfo != NULL) {
+ qDestroyQueryInfo(pQueryInfo->pQInfo);
+ pQueryInfo->pQInfo = NULL;
+ }
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
@@ -360,7 +358,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
- tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
+ tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 " - %" PRId64 " end, in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
@@ -374,9 +372,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
- int64_t maxDelay =
- (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
-
+ int64_t maxDelay = convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
+
int64_t delayDelta = maxDelay;
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
@@ -421,7 +418,6 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
}
} else {
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
- //int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream,
pStream->stime, pStream->etime);
@@ -444,18 +440,16 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
timer += getLaunchTimeDelay(pStream);
- if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
- timer = timer / 1000L;
- }
+ timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
tscSetRetryTimer(pStream, pSql, timer);
}
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
- (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
+ convertTimePrecision(tsMinIntervalTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (!pStream->isProject && pQueryInfo->interval.interval == 0) {
sprintf(pSql->cmd.payload, "the interval value is 0");
@@ -477,7 +471,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
}
int64_t minSlidingTime =
- (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
+ convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
@@ -505,7 +499,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
@@ -525,7 +519,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else {
return stime;
}
-
+
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
} else {
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
@@ -545,13 +539,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
timer = pStream->stime - now;
}
- int64_t startDelay =
- (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
-
+ int64_t startDelay = convertTimePrecision(tsStreamCompStartDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
+
timer += getLaunchTimeDelay(pStream);
timer += startDelay;
- return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
+ return convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
}
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
@@ -567,7 +560,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
return;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@@ -686,6 +679,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pSql->signature = pSql;
pSql->pTscObj = pObj;
+ pSql->rootObj = pSql;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@@ -709,7 +703,10 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pSql->maxRetry = TSDB_MAX_REPLICA;
tscSetStreamDestTable(pStream, dstTable);
- pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
+ pSql->pStream = pStream;
+ pSql->param = pStream;
+ pSql->maxRetry = TSDB_MAX_REPLICA;
+ pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql);
@@ -718,9 +715,14 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
}
strtolower(pSql->sqlstr, sqlstr);
+ pSql->fp = tscCreateStream;
+ pSql->fetchFp = tscCreateStream;
+ pSql->cmd.resColumnId = TSDB_RES_COL_ID;
- tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tsem_init(&pSql->rspSem, 0, 0);
+ registerSqlObj(pSql);
+
+ tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pSql->fp = cbParseSql;
pSql->fetchFp = cbParseSql;
diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c
index 421a6ce93a45d10acd8354f9ca41994e7bba465a..0c5eac66e2260f584cafe3855e76418a4462f325 100644
--- a/src/client/src/tscSub.c
+++ b/src/client/src/tscSub.c
@@ -127,6 +127,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->pSubscription = pSub;
+ pSql->rootObj = pSql;
pSub->pSql = pSql;
SSqlCmd* pCmd = &pSql->cmd;
@@ -151,6 +152,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
strtolower(pSql->sqlstr, pSql->sqlstr);
pRes->qId = 0;
pRes->numOfRows = 1;
+ pCmd->resColumnId = TSDB_RES_COL_ID;
code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (code != TSDB_CODE_SUCCESS) {
@@ -173,7 +175,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
if (pSql->cmd.command != TSDB_SQL_SELECT && pSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
line = __LINE__;
- code = TSDB_CODE_TSC_INVALID_SQL;
+ code = TSDB_CODE_TSC_INVALID_OPERATION;
goto fail;
}
@@ -264,9 +266,9 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
SSqlCmd* pCmd = &pSql->cmd;
- TSDB_QUERY_CLEAR_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
+ TSDB_QUERY_CLEAR_TYPE(tscGetQueryInfo(pCmd)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SSubscriptionProgress target = {.uid = pTableMeta->id.uid, .key = 0};
@@ -287,7 +289,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
}
size_t numOfTables = taosArrayGetSize(tables);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
for( size_t i = 0; i < numOfTables; i++ ) {
STidTags* tt = taosArrayGet( tables, i );
@@ -308,7 +310,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
taosArrayDestroy(tables);
if (pTableMetaInfo->pVgroupTables && taosArrayGetSize(pTableMetaInfo->pVgroupTables) > 0) {
- TSDB_QUERY_SET_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
+ TSDB_QUERY_SET_TYPE(tscGetQueryInfo(pCmd)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
}
pSub->lastSyncTime = taosGetTimestampMs();
@@ -502,11 +504,13 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
if (pSql == NULL) {
return NULL;
}
+
if (pSub->pSql->self != 0) {
taosReleaseRef(tscObjRef, pSub->pSql->self);
} else {
tscFreeSqlObj(pSub->pSql);
}
+
pSub->pSql = pSql;
pSql->pSubscription = pSub;
pSub->lastSyncTime = 0;
@@ -522,8 +526,8 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSqlObj *pSql = pSub->pSql;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single table subscription
size_t size = taosArrayGetSize(pSub->progress);
@@ -568,7 +572,10 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSql->fp = asyncCallback;
pSql->fetchFp = asyncCallback;
pSql->param = pSub;
- tscDoQuery(pSql);
+
+ pSql->cmd.active = pQueryInfo;
+ executeQuery(pSql, pQueryInfo);
+
tsem_wait(&pSub->sem);
if (pRes->code != TSDB_CODE_SUCCESS) {
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 75307087b19a163af4a51a7d58bdfbd80bf7c2ca..b293d92aa4a530905266e8f2a7483f5e4f8fcc80 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -22,9 +22,11 @@
#include "tcompare.h"
#include "tscLog.h"
#include "tscSubquery.h"
-#include "tschemautil.h"
+#include "qTableMeta.h"
#include "tsclient.h"
+#include "qUdf.h"
#include "qUtil.h"
+#include "qPlan.h"
typedef struct SInsertSupporter {
SSqlObj* pSql;
@@ -32,7 +34,7 @@ typedef struct SInsertSupporter {
} SInsertSupporter;
static void freeJoinSubqueryObj(SSqlObj* pSql);
-static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql);
+//static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql);
static int32_t tsCompare(int32_t order, int64_t left, int64_t right) {
if (left == right) {
@@ -55,9 +57,9 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
while (tsBufNextPos(pTSBuf)) {
- STSElem el1 = tsBufGetElem(pTSBuf);
+ el1 = tsBufGetElem(pTSBuf);
- int32_t res = tVariantCompare(el1.tag, tag1);
+ res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
@@ -65,15 +67,11 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, int8_t state) {
- assert(idx < subState->numOfSub);
- assert(subState->states);
+ assert(idx < subState->numOfSub && subState->states != NULL);
+ tscDebug("subquery:0x%"PRIx64",%d state set to %d", pSql->self, idx, state);
pthread_mutex_lock(&subState->mutex);
-
- tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
-
subState->states[idx] = state;
-
pthread_mutex_unlock(&subState->mutex);
}
@@ -84,50 +82,44 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
//lock in caller
tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub);
for (int i = 0; i < subState->numOfSub; i++) {
+ SSqlObj* pSub = pParentSql->pSubs[i];
if (0 == subState->states[i]) {
- tscDebug("0x%"PRIx64" subquery:%p, index: %d NOT finished, abort query completion check", pParentSql->self, pParentSql->pSubs[i], i);
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished yet", pParentSql->self, pSub->self, i);
done = false;
break;
} else {
- tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pParentSql->pSubs[i], i);
+ if (pSub != NULL) {
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pSub->self, i);
+ } else {
+ tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pSub, i);
+ }
}
}
return done;
}
-static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
+bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
SSubqueryState *subState = &pParentSql->subState;
-
assert(idx < subState->numOfSub);
pthread_mutex_lock(&subState->mutex);
- bool done = allSubqueryDone(pParentSql);
-
- if (done) {
- tscDebug("0x%"PRIx64" subquery:%p,%d all subs already done", pParentSql->self, pSql, idx);
-
- pthread_mutex_unlock(&subState->mutex);
-
- return false;
- }
-
- tscDebug("0x%"PRIx64" subquery:%p,%d state set to 1", pParentSql->self, pSql, idx);
-
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index:%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1;
- done = allSubqueryDone(pParentSql);
-
+ bool done = allSubqueryDone(pParentSql);
+ if (!done) {
+ tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, pSql, idx, pParentSql->subState.numOfSub);
+ }
pthread_mutex_unlock(&subState->mutex);
-
return done;
}
static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
win->skey = INT64_MAX;
win->ekey = INT64_MIN;
@@ -143,7 +135,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
int32_t equalNum = 0;
int32_t stackidx = 0;
SMergeTsCtx* ctx = NULL;
- SMergeTsCtx* pctx = NULL;
+ SMergeTsCtx* pctx = NULL;
SMergeTsCtx* mainCtx = NULL;
STSElem cur;
STSElem prev;
@@ -152,10 +144,10 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
for (int32_t i = 0; i < joinNum; ++i) {
STSBuf* output = tsBufCreate(true, pQueryInfo->order.order);
- SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
+ SQueryInfo* pSubQueryInfo = tscGetQueryInfo(&pSql->pSubs[i]->cmd);
pSubQueryInfo->tsBuf = output;
-
+
SJoinSupporter* pSupporter = pSql->pSubs[i]->param;
if (pSupporter->pTSBuf == NULL) {
@@ -170,7 +162,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
return 0;
}
- tscDebug("0x%"PRIx64" sub:%p table idx:%d, input group number:%d", pSql->self, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" table idx:%d, input group number:%d", pSql->self,
+ pSql->pSubs[i]->self, i, pSupporter->pTSBuf->numOfGroups);
ctxlist[i].p = pSupporter;
ctxlist[i].res = output;
@@ -202,7 +195,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
mainCtx = pctx;
- while (1) {
+ while (1) {
pctx = mainCtx;
prev = tsBufGetElem(pctx->p->pTSBuf);
@@ -218,9 +211,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
int32_t skipped = 0;
- for (int32_t i = 1; i < tableNum; ++i) {
+ for (int32_t i = 1; i < tableNum; ++i) {
SMergeTsCtx* tctx = &ctxlist[i];
-
+
// find the data in supporter2 with the same tag value
STSElem e2 = tsBufFindElemStartPosByTag(tctx->p->pTSBuf, &tag);
@@ -236,7 +229,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
stackidx = 0;
continue;
}
-
+
tableMIdx = taosArrayGet(tsCond, ++slot);
equalNum = 1;
@@ -261,7 +254,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
if (ret == 0) {
if (++equalNum < tableNum) {
pctx = ctx;
-
+
if (++slot >= tableNum) {
slot = 0;
}
@@ -269,14 +262,14 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
tableMIdx = taosArrayGet(tsCond, slot);
continue;
}
-
+
assert(stackidx == tableNum);
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > prev.ts) {
win->skey = prev.ts;
}
-
+
if (win->ekey < prev.ts) {
win->ekey = prev.ts;
}
@@ -284,8 +277,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
prev = tsBufGetElem(tctx->p->pTSBuf);
-
- tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts));
+
+ tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts));
}
} else {
pLimit->offset -= 1;//offset apply to projection?
@@ -293,11 +286,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
-
+
if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
mergeDone = 1;
}
- tctx->numOfInput++;
+ tctx->numOfInput++;
}
if (mergeDone) {
@@ -305,7 +298,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
}
stackidx = 0;
- equalNum = 1;
+ equalNum = 1;
ctxStack[stackidx++] = pctx;
} else if (ret > 0) {
@@ -313,15 +306,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
mergeDone = 1;
break;
}
-
+
ctx->numOfInput++;
stackidx--;
- } else {
+ } else {
stackidx--;
-
+
for (int32_t i = 0; i < stackidx; ++i) {
SMergeTsCtx* tctx = ctxStack[i];
-
+
if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) {
mergeDone = 1;
}
@@ -332,9 +325,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
break;
}
- stackidx = 0;
+ stackidx = 0;
equalNum = 1;
-
+
ctxStack[stackidx++] = pctx;
}
@@ -346,7 +339,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
slot = 0;
stackidx = 0;
-
+
skipRemainValue(mainCtx->p->pTSBuf, &tag);
}
@@ -368,7 +361,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
for (int32_t i = 0; i < joinNum; ++i) {
tsBufFlush(ctxlist[i].res);
-
+
tsBufDestroy(ctxlist[i].p->pTSBuf);
ctxlist[i].p->pTSBuf = NULL;
}
@@ -376,11 +369,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
TSKEY et = taosGetTimestampUs();
for (int32_t i = 0; i < joinNum; ++i) {
- tscDebug("0x%"PRIx64" sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
- pSql->self, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
+ pSql->self, pSql->pSubs[i]->self, i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(ctxlist[i].res), et - st);
- }
+ }
return ctxlist[0].res->numOfTotal;
}
@@ -393,15 +386,15 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
return NULL;
}
- pSupporter->pObj = pSql;
+ pSupporter->pObj = pSql->self;
pSupporter->subqueryIndex = index;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, index);
pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid;
assert (pSupporter->uid != 0);
@@ -419,15 +412,18 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
}
if (pSupporter->exprList != NULL) {
- tscSqlExprInfoDestroy(pSupporter->exprList);
+ tscExprDestroy(pSupporter->exprList);
+ pSupporter->exprList = NULL;
}
if (pSupporter->colList != NULL) {
tscColumnListDestroy(pSupporter->colList);
}
- tscFieldInfoClear(&pSupporter->fieldsInfo);
-
+// tscFieldInfoClear(&pSupporter->fieldsInfo);
+ if (pSupporter->fieldsInfo.internalField != NULL) {
+ taosArrayDestroy(pSupporter->fieldsInfo.internalField);
+ }
if (pSupporter->pTSBuf != NULL) {
tsBufDestroy(pSupporter->pTSBuf);
pSupporter->pTSBuf = NULL;
@@ -440,9 +436,9 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
pSupporter->f = NULL;
}
-
if (pSupporter->pVgroupTables != NULL) {
- taosArrayDestroy(pSupporter->pVgroupTables);
+ //taosArrayDestroy(pSupporter->pVgroupTables);
+ tscFreeVgroupTableInfo(pSupporter->pVgroupTables);
pSupporter->pVgroupTables = NULL;
}
@@ -451,25 +447,6 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) {
free(pSupporter);
}
-/*
- * need the secondary query process
- * In case of count(ts)/count(*)/spread(ts) query, that are only applied to
- * primary timestamp column , the secondary query is not necessary
- *
- */
-static UNUSED_FUNC bool needSecondaryQuery(SQueryInfo* pQueryInfo) {
- size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColumn* base = taosArrayGet(pQueryInfo->colList, i);
- if (base->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return true;
- }
- }
-
- return false;
-}
-
static void filterVgroupTables(SQueryInfo* pQueryInfo, SArray* pVgroupTables) {
int32_t num = 0;
int32_t* list = NULL;
@@ -570,12 +547,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
continue;
}
- SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0);
+ SQueryInfo *pSubQueryInfo = tscGetQueryInfo(&pPrevSub->cmd);
STSBuf *pTsBuf = pSubQueryInfo->tsBuf;
pSubQueryInfo->tsBuf = NULL;
// free result for async object will also free sqlObj
- assert(tscSqlExprNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one result columns
+ assert(tscNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one result columns
taos_free_result(pPrevSub);
SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, TSDB_SQL_SELECT, NULL);
@@ -584,12 +561,11 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
-
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pNew->cmd);
pQueryInfo->tsBuf = pTsBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
@@ -598,12 +574,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
- pQueryInfo->colList = pSupporter->colList;
- pQueryInfo->exprList = pSupporter->exprList;
- pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
+ pQueryInfo->colList = pSupporter->colList;
+ pQueryInfo->exprList = pSupporter->exprList;
+ pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
pQueryInfo->groupbyExpr = pSupporter->groupInfo;
+ pQueryInfo->pUpstream = taosArrayInit(4, sizeof(POINTER_BYTES));
- assert(pNew->subState.numOfSub == 0 && pNew->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1);
+ assert(pNew->subState.numOfSub == 0 && pQueryInfo->numOfTables == 1);
tscFieldInfoUpdateOffset(pQueryInfo);
@@ -611,35 +588,33 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
pTableMetaInfo->pVgroupTables = pSupporter->pVgroupTables;
pSupporter->exprList = NULL;
- pSupporter->colList = NULL;
+ pSupporter->colList = NULL;
pSupporter->pVgroupTables = NULL;
memset(&pSupporter->fieldsInfo, 0, sizeof(SFieldInfo));
- memset(&pSupporter->groupInfo, 0, sizeof(SSqlGroupbyExpr));
+ memset(&pSupporter->groupInfo, 0, sizeof(SGroupbyExpr));
/*
* When handling the projection query, the offset value will be modified for table-table join, which is changed
* during the timestamp intersection.
*/
pSupporter->limit = pQueryInfo->limit;
- pQueryInfo->limit = pSupporter->limit;
-
SColumnIndex index = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* s = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, 0);
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
- int16_t funcId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ int16_t funcId = pExpr->base.functionId;
// add the invisible timestamp column
- if ((pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) ||
+ if ((pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) ||
(funcId != TSDB_FUNC_TS && funcId != TSDB_FUNC_TS_DUMMY && funcId != TSDB_FUNC_PRJ)) {
int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS;
- tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL);
- tscPrintSelectClause(pNew, 0);
+ tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL, getNewResColId(&pNew->cmd));
+ tscPrintSelNodeList(pNew, 0);
tscFieldInfoUpdateOffset(pQueryInfo);
- pExpr = tscSqlExprGet(pQueryInfo, 0);
+ pExpr = tscExprGet(pQueryInfo, 0);
}
// set the join condition tag column info, todo extract method
@@ -648,8 +623,14 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
- pExpr->param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
- pExpr->numOfParams = 1;
+#ifndef _TD_NINGSI_60
+ pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
+#else
+ pExpr->base.param[0].i64 = colId;
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
+ pExpr->base.param[0].nLen = sizeof(int64_t);
+#endif
+ pExpr->base.numOfParams = 1;
}
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@@ -663,11 +644,11 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
}
}
- subquerySetState(pPrevSub, &pSql->subState, i, 0);
+ subquerySetState(pNew, &pSql->subState, i, 0);
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
- tscDebug("0x%"PRIx64" subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
- pSql->self, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
+ pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
}
@@ -686,7 +667,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
continue;
}
- tscDoQuery(pSql->pSubs[i]);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->pSubs[i]->cmd);
+ executeQuery(pSql->pSubs[i], pQueryInfo);
}
return TSDB_CODE_SUCCESS;
@@ -711,14 +693,12 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
}
tfree(pSql->subState.states);
-
-
pSql->subState.numOfSub = 0;
}
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
- tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code));
+ tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
return 0;
}
@@ -733,7 +713,6 @@ static void updateQueryTimeRange(SQueryInfo* pQueryInfo, STimeWindow* win) {
pQueryInfo->window = *win;
}
-
int32_t tagValCompar(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) varDataVal(p1);
const STidTags* t2 = (const STidTags*) varDataVal(p2);
@@ -811,12 +790,14 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
tscClearSubqueryInfo(pCmd);
tscFreeSqlResult(pSql);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
assert(pQueryInfo->numOfTables == 1);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STimeWindow window = pQueryInfo->window;
tscInitQueryInfo(pQueryInfo);
+ pQueryInfo->window = window;
TSDB_QUERY_CLEAR_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
@@ -826,14 +807,16 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL);
+ tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0);
+ SExprInfo *pExpr = tscExprGet(pQueryInfo, 0);
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
- pExpr->param->i64 = tagColId;
- pExpr->numOfParams = 1;
+ pExpr->base.param[0].i64 = tagColId;
+ pExpr->base.param[0].nLen = sizeof(int64_t);
+ pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
+ pExpr->base.numOfParams = 1;
}
// add the filter tag column
@@ -843,7 +826,7 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
for (int32_t i = 0; i < s; ++i) {
SColumn *pCol = taosArrayGetP(pSupporter->colList, i);
- if (pCol->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
+ if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pQueryInfo->colList, &p);
}
@@ -853,12 +836,12 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug(
- "%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
+ "0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
- pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
- tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
+ pParent->self, pSql->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
+ tscNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
}
static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSqlObj* pPSqlObj) {
@@ -877,6 +860,40 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
return true;
}
+
+bool tscReparseSql(SSqlObj *sql, int32_t code){
+ if (!((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && sql->retry < sql->maxRetry)) {
+ return true;
+ }
+
+ tscFreeSubobj(sql);
+ tfree(sql->pSubs);
+
+ sql->res.code = TSDB_CODE_SUCCESS;
+ sql->retry++;
+
+ tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", sql->self,
+ tstrerror(code), sql->retry);
+
+ tscResetSqlCmd(&sql->cmd, true);
+ code = tsParseSql(sql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return false;
+ }
+
+ if (code != TSDB_CODE_SUCCESS) {
+ sql->res.code = code;
+ tscAsyncResultOnError(sql);
+ return false;
+ }
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&sql->cmd);
+ executeQuery(sql, pQueryInfo);
+
+ return false;
+}
+
+
static void setTidTagType(SJoinSupporter* p, uint8_t type) {
for (int32_t i = 0; i < p->num; ++i) {
STidTags * tag = (STidTags*) varDataVal(p->pIdTagList + i * p->tagSize);
@@ -906,12 +923,14 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
ctxlist[i].p = p;
ctxlist[i].res = taosArrayInit(p->num, size);
-
+
tscDebug("Join %d - num:%d", i, p->num);
-
+
// sort according to the tag valu
- qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
-
+ if (p->pIdTagList != NULL) {
+ qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar);
+ }
+
if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) {
for (int32_t j = 0; j <= i; j++) {
taosArrayDestroy(ctxlist[j].res);
@@ -964,9 +983,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
mergeDone = 0;
continue;
}
-
+
tableMIdx = taosArrayGet(tagCond, slot);
-
+
pctx = &ctxlist[*tableMIdx];
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
@@ -976,10 +995,10 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
tableMIdx = taosArrayGet(tagCond, ++slot);
equalNum = 1;
-
+
while (1) {
ctx = &ctxlist[*tableMIdx];
-
+
cur = (STidTags*) varDataVal(ctx->p->pIdTagList + ctx->idx * ctx->p->tagSize);
assert(cur->tid != 0 && prev->tid != 0);
@@ -991,7 +1010,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
if (++equalNum < tableNum) {
prev = cur;
pctx = ctx;
-
+
if (++slot >= tableNum) {
slot = 0;
}
@@ -1004,7 +1023,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
*(int*) prev->tag, prev->tid, prev->uid, cur->tid, cur->uid);
assert(stackidx == tableNum);
-
+
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
prev = (STidTags*) varDataVal(tctx->p->pIdTagList + tctx->idx * tctx->p->tagSize);
@@ -1014,7 +1033,7 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
-
+
if (++tctx->idx >= tctx->p->num) {
mergeDone = 1;
break;
@@ -1027,19 +1046,19 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
stackidx = 0;
equalNum = 1;
-
+
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
} else if (ret > 0) {
stackidx--;
-
+
if (++ctx->idx >= ctx->p->num) {
break;
}
} else {
stackidx--;
-
+
for (int32_t i = 0; i < stackidx; ++i) {
SMergeCtx* tctx = ctxStack[i];
if (++tctx->idx >= tctx->p->num) {
@@ -1052,9 +1071,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
break;
}
- stackidx = 0;
+ stackidx = 0;
equalNum = 1;
-
+
prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize);
ctxStack[stackidx++] = pctx;
}
@@ -1070,14 +1089,14 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
// reorganize the tid-tag value according to both the vgroup id and tag values
// sort according to the tag value
size_t num = taosArrayGetSize(ctxlist[i].res);
-
+
qsort((ctxlist[i].res)->pData, num, size, tidTagsCompar);
taosArrayPush(resList, &ctxlist[i].res);
tscDebug("0x%"PRIx64" tags match complete, result num: %"PRIzu, pParentSql->self, num);
}
-
+
return TSDB_CODE_SUCCESS;
}
@@ -1100,13 +1119,16 @@ bool emptyTagList(SArray* resList, int32_t size) {
static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
- SSqlObj* pParentSql = pSupporter->pObj;
+ int64_t handle = pSupporter->pObj;
+
+ SSqlObj* pParentSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
+ if (pParentSql == NULL) return;
SSqlObj* pSql = (SSqlObj*)tres;
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
// todo, the type may not include TSDB_QUERY_TYPE_TAG_FILTER_QUERY
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
@@ -1114,12 +1136,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
}
- tscAsyncResultOnError(pParentSql);
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
+ }
- return;
+ tscAsyncResultOnError(pParentSql);
+ goto _return;
}
// check for the error code firstly
@@ -1131,11 +1156,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
+ }
+
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
// keep the results in memory
@@ -1150,11 +1179,11 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
pSupporter->pIdTagList = tmp;
@@ -1166,7 +1195,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// query not completed, continue to retrieve tid + tag tuples
if (!pRes->completed) {
taos_fetch_rows_a(tres, tidTagRetrieveCallback, param);
- return;
+ goto _return;
}
}
@@ -1187,15 +1216,15 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// set the callback function
pSql->fp = tscJoinQueryCallback;
- tscProcessSql(pSql);
- return;
+ tscBuildAndSendRequest(pSql, NULL);
+ goto _return;
}
// no data exists in next vnode, mark the query completed
// only when there is no subquery exits any more, proceeds to get the intersect of the tuple sets.
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
- tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
- return;
+ //tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
+ goto _return;
}
SArray* resList = taosArrayInit(pParentSql->subState.numOfSub, sizeof(SArray *));
@@ -1207,7 +1236,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscAsyncResultOnError(pParentSql);
taosArrayDestroy(resList);
- return;
+ goto _return;
}
if (emptyTagList(resList, pParentSql->subState.numOfSub)) { // no results,return.
@@ -1227,14 +1256,14 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// proceed to for ts_comp query
SSqlCmd* pSubCmd = &pParentSql->pSubs[m]->cmd;
SArray** s = taosArrayGet(resList, m);
-
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+
+ SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pSubCmd);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0);
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo, *s);
-
+
SSqlObj* psub = pParentSql->pSubs[m];
((SJoinSupporter*)psub->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo->pVgroupTables);
-
+
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("0x%"PRIx64" reset all sub states to 0", pParentSql->self);
@@ -1251,29 +1280,39 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
}
taosArrayDestroy(resList);
+
+_return:
+ taosReleaseRef(tscObjRef, handle);
}
static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
- SSqlObj* pParentSql = pSupporter->pObj;
+ int64_t handle = pSupporter->pObj;
+
+ SSqlObj* pParentSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
+ if (pParentSql == NULL) return;
SSqlObj* pSql = (SSqlObj*)tres;
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
- return;
+ goto _return;
+ }
+
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
// check for the error code firstly
@@ -1284,11 +1323,15 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
- return;
+ goto _return;
+ }
+
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
if (numOfRows > 0) { // write the compressed timestamp to disk file
@@ -1297,16 +1340,15 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (pSupporter->f == NULL) {
tscError("0x%"PRIx64" failed to create tmp file:%s, reason:%s", pSql->self, pSupporter->path, strerror(errno));
-
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
}
@@ -1320,12 +1362,12 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
- return;
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
if (pSupporter->pTSBuf == NULL) {
@@ -1344,7 +1386,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pRes->row = pRes->numOfRows;
taos_fetch_rows_a(tres, tsCompRetrieveCallback, param);
- return;
+ goto _return;
}
}
@@ -1371,12 +1413,12 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// set the callback function
pSql->fp = tscJoinQueryCallback;
- tscProcessSql(pSql);
- return;
+ tscBuildAndSendRequest(pSql, NULL);
+ goto _return;
}
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
- return;
+ goto _return;
}
tscDebug("0x%"PRIx64" all subquery retrieve ts complete, do ts block intersect", pParentSql->self);
@@ -1390,37 +1432,45 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// set no result command
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
- return;
+ goto _return;
}
// launch the query the retrieve actual results from vnode along with the filtered timestamp
- SQueryInfo* pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex);
+ SQueryInfo* pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
updateQueryTimeRange(pPQueryInfo, &win);
//update the vgroup that involved in real data query
tscLaunchRealSubqueries(pParentSql);
+
+_return:
+ taosReleaseRef(tscObjRef, handle);
}
static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfRows) {
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
+ int64_t handle = pSupporter->pObj;
- SSqlObj* pParentSql = pSupporter->pObj;
+ SSqlObj* pParentSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
+ if (pParentSql == NULL) return;
SSqlObj* pSql = (SSqlObj*)tres;
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
+ }
+
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
-
- return;
+ goto _return;
}
@@ -1431,7 +1481,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
if (numOfRows >= 0) {
@@ -1456,16 +1506,16 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
- tscProcessSql(pSql);
- return;
+ tscBuildAndSendRequest(pSql, NULL);
+ goto _return;
} else {
tscDebug("0x%"PRIx64" no result in current subquery anymore", pSql->self);
}
}
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
- tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pState->numOfSub);
- return;
+ //tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
+ goto _return;
}
tscDebug("0x%"PRIx64" all %d secondary subqueries retrieval completed, code:%d", pSql->self, pState->numOfSub, pParentSql->res.code);
@@ -1488,21 +1538,24 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pParentSql->res.precision = pRes1->precision;
if (pRes1->row > 0 && pRes1->numOfRows > 0) {
- tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i], i,
- pRes1->numOfRows, pRes1->numOfTotal);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
+ pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
assert(pRes1->row < pRes1->numOfRows);
} else {
if (!stableQuery) {
pRes1->numOfClauseTotal += pRes1->numOfRows;
}
- tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql->self, pParentSql->pSubs[i], i,
- pRes1->numOfRows, pRes1->numOfTotal);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self,
+ pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
}
}
// data has retrieved to client, build the join results
tscBuildResFromSubqueries(pParentSql);
+
+_return:
+ taosReleaseRef(tscObjRef, handle);
}
void tscFetchDatablockForSubquery(SSqlObj* pSql) {
@@ -1521,8 +1574,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
SSqlRes *pRes = &pSub->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0);
-
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSub->cmd);
if (!tscHasReachLimitation(pQueryInfo, pRes)) {
if (pRes->row >= pRes->numOfRows) {
// no data left in current result buffer
@@ -1574,7 +1626,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
continue;
}
- SQueryInfo* p = tscGetQueryInfoDetail(&pSub->cmd, 0);
+ SQueryInfo* p = tscGetQueryInfo(&pSub->cmd);
orderedPrjQuery = tscNonOrderedProjectionQueryOnSTable(p, 0);
if (orderedPrjQuery) {
break;
@@ -1598,7 +1650,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
continue;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSub->cmd);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && pSub->res.row >= pSub->res.numOfRows &&
pSub->res.completed) {
@@ -1619,7 +1671,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
pSub->cmd.command = TSDB_SQL_SELECT;
pSub->fp = tscJoinQueryCallback;
- tscProcessSql(pSub);
+ tscBuildAndSendRequest(pSub, NULL);
tryNextVnode = true;
} else {
tscDebug("0x%"PRIx64" no result in current subquery anymore", pSub->self);
@@ -1654,8 +1706,9 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
continue;
}
- SSqlRes* pRes1 = &pSql1->res;
+
+ SSqlRes* pRes1 = &pSql1->res;
if (pRes1->row >= pRes1->numOfRows) {
subquerySetState(pSql1, &pSql->subState, i, 0);
}
@@ -1673,13 +1726,13 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
pSupporter = (SJoinSupporter*)pSql1->param;
// wait for all subqueries completed
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd1);
assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pRes1->row >= pRes1->numOfRows) {
- tscDebug("0x%"PRIx64" subquery:%p retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1,
+ tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1->self,
pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex);
tscResetForNextRetrieve(pRes1);
@@ -1689,7 +1742,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
- tscProcessSql(pSql1);
+ tscBuildAndSendRequest(pSql1, NULL);
}
}
}
@@ -1699,15 +1752,14 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
-
// the column transfer support struct has been built
if (pRes->pColumnIndex != NULL) {
return;
}
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
- int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfExprs = (int32_t)tscNumOfExprs(pQueryInfo);
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs);
if (pRes->pColumnIndex == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -1715,12 +1767,12 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
}
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t tableIndexOfSub = -1;
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, j);
- if (pTableMetaInfo->pTableMeta->id.uid == pExpr->uid) {
+ if (pTableMetaInfo->pTableMeta->id.uid == pExpr->base.uid) {
tableIndexOfSub = j;
break;
}
@@ -1729,12 +1781,12 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables);
SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd;
- SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
+ SQueryInfo* pSubQueryInfo = tscGetQueryInfo(pSubCmd);
size_t numOfSubExpr = taosArrayGetSize(pSubQueryInfo->exprList);
for (int32_t k = 0; k < numOfSubExpr; ++k) {
- SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k);
- if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) {
+ SExprInfo* pSubExpr = tscExprGet(pSubQueryInfo, k);
+ if (pExpr->base.functionId == pSubExpr->base.functionId && pExpr->base.colInfo.colId == pSubExpr->base.colInfo.colId) {
pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k};
break;
}
@@ -1742,32 +1794,40 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
}
// restore the offset value for super table query in case of final result.
- tscRestoreFuncForSTableQuery(pQueryInfo);
- tscFieldInfoUpdateOffset(pQueryInfo);
+// tscRestoreFuncForSTableQuery(pQueryInfo);
+// tscFieldInfoUpdateOffset(pQueryInfo);
}
+
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
SSqlObj* pSql = (SSqlObj*)tres;
SJoinSupporter* pSupporter = (SJoinSupporter*)param;
- SSqlObj* pParentSql = pSupporter->pObj;
+ int64_t handle = pSupporter->pObj;
+
+ SSqlObj* pParentSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
+ if (pParentSql == NULL) return;
// There is only one subquery and table for each subquery.
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- assert(pQueryInfo->numOfTables == 1 && pSql->cmd.numOfClause == 1);
+ assert(pQueryInfo->numOfTables == 1);
// retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, code, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
}
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
+ }
+
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
// TODO here retry is required, not directly returns to client
@@ -1778,34 +1838,38 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
pParentSql->res.code = code;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
- return;
+ goto _return;
+ }
+
+ if (!tscReparseSql(pParentSql->rootObj, pParentSql->res.code)) {
+ goto _return;
}
tscAsyncResultOnError(pParentSql);
- return;
+ goto _return;
}
// retrieve tuples from vnode
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
pSql->fp = tidTagRetrieveCallback;
pSql->cmd.command = TSDB_SQL_FETCH;
- tscProcessSql(pSql);
- return;
+ tscBuildAndSendRequest(pSql, NULL);
+ goto _return;
}
// retrieve ts_comp info from vnode
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
pSql->fp = tsCompRetrieveCallback;
pSql->cmd.command = TSDB_SQL_FETCH;
- tscProcessSql(pSql);
- return;
+ tscBuildAndSendRequest(pSql, NULL);
+ goto _return;
}
// In case of consequence query from other vnode, do not wait for other query response here.
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
- return;
+ goto _return;
}
}
@@ -1819,7 +1883,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
} else { // first retrieve from vnode during the secondary stage sub-query
// set the command flag must be after the semaphore been correctly set.
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
@@ -1828,6 +1892,11 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
tscAsyncResultOnError(pParentSql);
}
}
+
+
+_return:
+ taosReleaseRef(tscObjRef, handle);
+
}
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1837,7 +1906,7 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
SSqlCmd * pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
pSql->res.qId = 0x1;
assert(pSql->res.numOfRows == 0);
@@ -1860,16 +1929,9 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
addGroupInfoForSubquery(pSql, pNew, 0, tableIndex);
// refactor as one method
- SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd);
assert(pNewQueryInfo != NULL);
-
- // update the table index
- size_t num = taosArrayGetSize(pNewQueryInfo->colList);
- for (int32_t i = 0; i < num; ++i) {
- SColumn* pCol = taosArrayGetP(pNewQueryInfo->colList, i);
- pCol->colIndex.tableIndex = 0;
- }
-
+
pSupporter->colList = pNewQueryInfo->colList;
pNewQueryInfo->colList = NULL;
@@ -1885,7 +1947,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
}
pSupporter->groupInfo = pNewQueryInfo->groupbyExpr;
- memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
+ memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SGroupbyExpr));
pNew->cmd.numOfCols = 0;
pNewQueryInfo->interval.interval = 0;
@@ -1893,13 +1955,17 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
pNewQueryInfo->limit.limit = -1;
pNewQueryInfo->limit.offset = 0;
+ taosArrayDestroy(pNewQueryInfo->pUpstream);
pNewQueryInfo->order.orderColId = INT32_MIN;
// backup the data and clear it in the sqlcmd object
- memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
-
+ memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SGroupbyExpr));
+
+ STimeWindow range = pNewQueryInfo->window;
tscInitQueryInfo(pNewQueryInfo);
+
+ pNewQueryInfo->window = range;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // return the tableId & tag
@@ -1917,7 +1983,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
int16_t type = 0;
int32_t inter = 0;
- getResultDataInfo(s->type, s->bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0);
+ getResultDataInfo(s->type, s->bytes, TSDB_FUNC_TID_TAG, 0, &type, &bytes, &inter, 0, 0, NULL);
SSchema s1 = {.colId = s->colId, .type = (uint8_t)type, .bytes = bytes};
pSupporter->tagSize = s1.bytes;
@@ -1925,26 +1991,26 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
// set get tags query type
TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
- tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG);
+ tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG, getNewResColId(pCmd));
size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList);
tscDebug(
- "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
+ "0x%"PRIX64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s",
- pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
+ pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL);
+ tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function
- SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0);
+ SExprInfo *pExpr = tscExprGet(pNewQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
- pExpr->param->i64 = tagColId;
- pExpr->numOfParams = 1;
+ pExpr->base.param->i64 = tagColId;
+ pExpr->base.numOfParams = 1;
}
// add the filter tag column
@@ -1954,7 +2020,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
for (int32_t i = 0; i < s; ++i) {
SColumn *pCol = taosArrayGetP(pSupporter->colList, i);
- if (pCol->numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
+ if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
SColumn *p = tscColumnClone(pCol);
taosArrayPush(pNewQueryInfo->colList, &p);
}
@@ -1964,14 +2030,14 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList);
tscDebug(
- "%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
+ "0x%"PRIX64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
- pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
+ pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
}
} else {
assert(0);
- SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd);
pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY;
}
@@ -1982,7 +2048,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0);
int32_t code = TSDB_CODE_SUCCESS;
@@ -1999,15 +2065,10 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
}
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
- tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self);
-
- bool hasEmptySub = false;
+ tscDebug("0x%"PRIx64" reset all sub states to 0, start subquery, total:%d", pSql->self, pQueryInfo->numOfTables);
- tscDebug("0x%"PRIx64" start subquery, total:%d", pSql->self, pQueryInfo->numOfTables);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
-
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
-
if (pSupporter == NULL) { // failed to create support struct, abort current query
tscError("0x%"PRIx64" tableIndex:%d, failed to allocate join support object, abort further query", pSql->self, i);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -2021,16 +2082,15 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
}
SSqlObj* pSub = pSql->pSubs[i];
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSub->cmd, 0, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSub->cmd, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->vgroupList->numOfVgroups == 0)) {
- hasEmptySub = true;
+ pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
break;
}
}
- if (hasEmptySub) { // at least one subquery is empty, do nothing and return
+ if (pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { // at least one subquery is empty, do nothing and return
freeJoinSubqueryObj(pSql);
- pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pSql->fp)(pSql->param, pSql, 0);
} else {
int fail = 0;
@@ -2041,7 +2101,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
continue;
}
- if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
+ if ((code = tscBuildAndSendRequest(pSub, NULL)) != TSDB_CODE_SUCCESS) {
pRes->code = code;
(*pSub->fp)(pSub->param, pSub, 0);
fail = 1;
@@ -2062,17 +2122,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
tscAsyncResultOnError(pSql);
}
-static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL);
-
- SRetrieveSupport* pSupport = pSub->param;
-
- tfree(pSupport->localBuffer);
- tfree(pSupport);
+
+ tscFreeRetrieveSup(pSub);
taos_free_result(pSub);
}
@@ -2110,12 +2167,12 @@ typedef struct SFirstRoundQuerySup {
void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, SQueryInfo* pQueryInfo) {
TSKEY key = INT64_MIN;
for(int32_t i = 0; i < numOfCols; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag) || pExpr->base.functionId == TSDB_FUNC_PRJ) {
continue;
}
- if (pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ if (pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
key = *(TSKEY*) row[i];
continue;
}
@@ -2127,7 +2184,7 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S
SET_DOUBLE_NULL(&v);
}
- int32_t id = pExpr->colInfo.colId;
+ int32_t id = pExpr->base.colInfo.colId;
int32_t numOfQueriedCols = (int32_t) taosArrayGetSize(pInterResult->pResult);
SArray* p = NULL;
@@ -2171,7 +2228,7 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SFirstRoundQuerySup* pSup = param;
SSqlObj* pParent = pSup->pParent;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
int32_t code = taos_errno(pSql);
if (code != TSDB_CODE_SUCCESS) {
@@ -2203,16 +2260,16 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
int32_t offset = 0;
for (int32_t i = 0; i < numOfCols && offset < pSup->tagLen; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
// tag or group by column
- if (TSDB_COL_IS_TAG(pExpr->colInfo.flag) || pExpr->functionId == TSDB_FUNC_PRJ) {
+ if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag) || pExpr->base.functionId == TSDB_FUNC_PRJ) {
if (row[i] == NULL) {
- setNull(p + offset, pExpr->resType, pExpr->resBytes);
+ setNull(p + offset, pExpr->base.resType, pExpr->base.resBytes);
} else {
memcpy(p + offset, row[i], length[i]);
}
- offset += pExpr->resBytes;
+ offset += pExpr->base.resBytes;
}
}
@@ -2247,14 +2304,14 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
}
}
- if (!pRes->completed) {
+ if (!pRes->completed && numOfRows > 0) {
taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param);
return;
}
// set the parameters for the second round query process
SSqlCmd *pPCmd = &pParent->cmd;
- SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0);
+ SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pPCmd);
int32_t resRows = pSup->numOfRows;
if (pSup->numOfRows > 0) {
@@ -2281,7 +2338,7 @@ void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
}
pQueryInfo1->round = 1;
- tscDoQuery(pParent);
+ executeQuery(pParent, pQueryInfo1);
}
void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) {
@@ -2295,7 +2352,7 @@ void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) {
destroySup(pSup);
taos_free_result(pSql);
- parent->res.code = code;
+ parent->res.code = c;
tscAsyncResultOnError(parent);
return;
}
@@ -2304,8 +2361,8 @@ void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) {
}
int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
- STableMetaInfo* pTableMetaInfo1 = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ STableMetaInfo* pTableMetaInfo1 = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
SFirstRoundQuerySup *pSup = calloc(1, sizeof(SFirstRoundQuerySup));
@@ -2317,15 +2374,21 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SSqlObj *pNew = createSubqueryObj(pSql, 0, tscFirstRoundCallback, pSup, TSDB_SQL_SELECT, NULL);
SSqlCmd *pCmd = &pNew->cmd;
+ SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pCmd);
+ assert(pQueryInfo->numOfTables == 1);
+
+ SArray* pColList = pNewQueryInfo->colList;
+ pNewQueryInfo->colList = NULL;
+ pNewQueryInfo->fillType = TSDB_FILL_NONE;
+
tscClearSubqueryInfo(pCmd);
tscFreeSqlResult(pSql);
- SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
- assert(pQueryInfo->numOfTables == 1);
-
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0);
tscInitQueryInfo(pNewQueryInfo);
+
+ // add the group cond
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
@@ -2335,83 +2398,109 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
}
}
+ // add the tag filter cond
if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
+ pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->interval = pQueryInfo->interval;
+ pNewQueryInfo->sessionWindow = pQueryInfo->sessionWindow;
pCmd->command = TSDB_SQL_SELECT;
pNew->fp = tscFirstRoundCallback;
- int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
int32_t index = 0;
for(int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) {
- taosArrayPush(pSup->pColsInfo, &pExpr->resColId);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) {
+ taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
- SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId);
+ SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
- SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL);
- p->resColId = pExpr->resColId; // update the result column id
- } else if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) {
- taosArrayPush(pSup->pColsInfo, &pExpr->resColId);
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ p->base.resColId = pExpr->base.resColId; // update the result column id
+ } else if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) {
+ taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId);
- SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex};
+ SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->base.colInfo.colIndex};
SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)};
- tstrncpy(schema.name, pExpr->aliasName, tListLen(schema.name));
+ tstrncpy(schema.name, pExpr->base.aliasName, tListLen(schema.name));
- SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL);
- p->resColId = pExpr->resColId; // update the result column id
- } else if (pExpr->functionId == TSDB_FUNC_TAG) {
- pSup->tagLen += pExpr->resBytes;
- SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex};
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ p->base.resColId = pExpr->base.resColId; // update the result column id
+ } else if (pExpr->base.functionId == TSDB_FUNC_TAG) {
+ pSup->tagLen += pExpr->base.resBytes;
+ SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->base.colInfo.colIndex};
SSchema* schema = NULL;
- if (pExpr->colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) {
- schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId);
+ if (pExpr->base.colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) {
+ schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
} else {
schema = tGetTbnameColumnSchema();
}
- SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG);
- p->resColId = pExpr->resColId;
- } else if (pExpr->functionId == TSDB_FUNC_PRJ) {
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd));
+ p->base.resColId = pExpr->base.resColId;
+ } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) {
int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo);
for(int32_t k = 0; k < num; ++k) {
SColIndex* pIndex = taosArrayGet(pNewQueryInfo->groupbyExpr.columnInfo, k);
- if (pExpr->colInfo.colId == pIndex->colId) {
- pSup->tagLen += pExpr->resBytes;
- taosArrayPush(pSup->pColsInfo, &pExpr->resColId);
+ if (pExpr->base.colInfo.colId == pIndex->colId) {
+ pSup->tagLen += pExpr->base.resBytes;
+ taosArrayPush(pSup->pColsInfo, &pExpr->base.resColId);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pIndex->colIndex};
- SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId);
+ SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->base.colInfo.colId);
//doLimitOutputNormalColOfGroupby
- SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL);
- p->numOfParams = 1;
- p->param[0].i64 = 1;
- p->param[0].nType = TSDB_DATA_TYPE_INT;
- p->resColId = pExpr->resColId; // update the result column id
+ SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_PRJ, &colIndex, schema, TSDB_COL_NORMAL, getNewResColId(pCmd));
+ p->base.numOfParams = 1;
+ p->base.param[0].i64 = 1;
+ p->base.param[0].nType = TSDB_DATA_TYPE_INT;
+ p->base.resColId = pExpr->base.resColId; // update the result column id
}
}
}
}
- SColumnIndex columnIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
- tscInsertPrimaryTsSourceColumn(pNewQueryInfo, &columnIndex);
+ // add the normal column filter cond
+ if (pColList != NULL) {
+ size_t s = taosArrayGetSize(pColList);
+ for (int32_t i = 0; i < s; ++i) {
+ SColumn *pCol = taosArrayGetP(pColList, i);
+ if (pCol->info.flist.numOfFilters > 0) { // copy to the pNew->cmd.colList if it is filtered.
+ int32_t index1 = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ if (index1 >= 0) {
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1);
+ tscColumnCopy(x, pCol);
+ } else {
+ SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
+ tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
+ int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ assert(ti >= 0);
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
+ tscColumnCopy(x, pCol);
+ }
+ }
+ }
+
+ tscColumnListDestroy(pColList);
+ }
+
+ tscInsertPrimaryTsSourceColumn(pNewQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
tscTansformFuncForSTableQuery(pNewQueryInfo);
tscDebug(
- "%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
+ "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
- pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
- tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
+ pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
+ tscNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscHandleMasterSTableQuery(pNew);
return TSDB_CODE_SUCCESS;
@@ -2433,12 +2522,16 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) {
SSqlObj* pSql = pSchedMsg->ahandle;
SPair* p = pSchedMsg->msg;
- for(int32_t i = p->first; i < p->second; ++i) {
+ for (int32_t i = p->first; i < p->second; ++i) {
+ if (i >= pSql->subState.numOfSub) {
+ tfree(p);
+ return;
+ }
SSqlObj* pSub = pSql->pSubs[i];
SRetrieveSupport* pSupport = pSub->param;
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch subquery, orderOfSub:%d.", pSql->self, pSub->self, pSupport->subqueryIndex);
- tscProcessSql(pSub);
+ tscBuildAndSendRequest(pSub, NULL);
}
tfree(p);
@@ -2450,20 +2543,18 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
// pRes->code check only serves in launching metric sub-queries
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
- pCmd->command = TSDB_SQL_RETRIEVE_LOCALMERGE; // enable the abort of kill super table function.
+ pCmd->command = TSDB_SQL_RETRIEVE_GLOBALMERGE; // enable the abort of kill super table function.
return pRes->code;
}
tExtMemBuffer **pMemoryBuf = NULL;
tOrderDescriptor *pDesc = NULL;
- SColumnModel *pModel = NULL;
- SColumnModel *pFinalModel = NULL;
pRes->qId = 0x1; // hack the qhandle check
-
- const uint32_t nBufferSize = (1u << 16u); // 64KB
-
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+
+ const uint32_t nBufferSize = (1u << 18u); // 256KB
+
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSubqueryState *pState = &pSql->subState;
@@ -2476,10 +2567,11 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
assert(pState->numOfSub > 0);
- int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, &pFinalModel, nBufferSize);
+ int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
if (ret != 0) {
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ pRes->code = ret;
tscAsyncResultOnError(pSql);
+ tfree(pDesc);
tfree(pMemoryBuf);
return ret;
}
@@ -2489,7 +2581,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
if (pSql->pSubs == NULL) {
tfree(pSql->pSubs);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel,pState->numOfSub);
+ tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
tscAsyncResultOnError(pSql);
return ret;
@@ -2499,8 +2591,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
if (pState->states == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
+
tscAsyncResultOnError(pSql);
- tfree(pMemoryBuf);
return ret;
}
@@ -2524,6 +2617,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->pOrderDescriptor = pDesc;
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
+ trs->localBufferSize = nBufferSize + sizeof(tFilePage);
if (trs->localBuffer == NULL) {
tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs);
@@ -2532,8 +2626,6 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->subqueryIndex = i;
trs->pParentSql = pSql;
- trs->pFinalColModel = pModel;
- trs->pFFColModel = pFinalModel;
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
if (pNew == NULL) {
@@ -2545,25 +2637,26 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
// todo handle multi-vnode situation
if (pQueryInfo->tsBuf) {
- SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+ SQueryInfo *pNewQueryInfo = tscGetQueryInfo(&pNew->cmd);
pNewQueryInfo->tsBuf = tsBufClone(pQueryInfo->tsBuf);
assert(pNewQueryInfo->tsBuf != NULL);
}
- tscDebug("0x%"PRIx64" sub:%p create subquery success. orderOfSub:%d", pSql->self, pNew, trs->subqueryIndex);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self,
+ trs->subqueryIndex);
}
if (i < pState->numOfSub) {
tscError("0x%"PRIx64" failed to prepare subquery structure and launch subqueries", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
+ tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i);
return pRes->code; // free all allocated resource
}
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
- tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
+ tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc, pState->numOfSub);
doCleanupSubqueries(pSql, i);
return pRes->code;
}
@@ -2574,7 +2667,12 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK;
assert(numOfTasks >= 1);
- int32_t num = (pState->numOfSub/numOfTasks) + 1;
+ int32_t num;
+ if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) {
+ num = MAX_REQUEST_PER_TASK;
+ } else {
+ num = pState->numOfSub / numOfTasks + 1;
+ }
tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks);
for(int32_t j = 0; j < numOfTasks; ++j) {
@@ -2599,7 +2697,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
-static void tscFreeRetrieveSup(SSqlObj *pSql) {
+void tscFreeRetrieveSup(SSqlObj *pSql) {
SRetrieveSupport *trsupport = pSql->param;
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
@@ -2640,8 +2738,10 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
memcpy(trsupport, oriTrs, sizeof(*trsupport));
- const uint32_t nBufferSize = (1u << 16u); // 64KB
- trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
+ // the buffer size should be the same as tscHandleMasterSTableQuery, which was used to initialize the SColumnModel
+ // the capacity member of SColumnModel will be used to save the trsupport->localBuffer in tscRetrieveFromDnodeCallBack
+ trsupport->localBuffer = (tFilePage *)calloc(1, oriTrs->localBufferSize);
+
if (trsupport->localBuffer == NULL) {
tscError("0x%"PRIx64" failed to malloc buffer for local buffer, reason:%s", pSql->self, strerror(errno));
tfree(trsupport);
@@ -2651,7 +2751,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
SSqlObj *pParentSql = trsupport->pParentSql;
int32_t subqueryIndex = trsupport->subqueryIndex;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]);
@@ -2673,7 +2773,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
return pParentSql->res.code;
}
- int32_t ret = tscProcessSql(pNew);
+ int32_t ret = tscBuildAndSendRequest(pNew, NULL);
*sent = 1;
@@ -2736,7 +2836,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
- tscDebug("0x%"PRIx64" sub:%p,%d freed, not finished, total:%d", pParentSql->self, pSql, trsupport->subqueryIndex, pState->numOfSub);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d freed, not finished, total:%d", pParentSql->self,
+ pSql->self, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql);
return;
@@ -2747,16 +2848,48 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
tstrerror(pParentSql->res.code));
// release allocated resource
- tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, trsupport->pFFColModel,
- pState->numOfSub);
-
+ tscDestroyGlobalMergerEnv(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, pState->numOfSub);
tscFreeRetrieveSup(pSql);
// in case of second stage join subquery, invoke its callback function instead of regular QueueAsyncRes
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
- (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
+
+ int32_t code = pParentSql->res.code;
+ SSqlObj *userSql = pParentSql->rootObj;
+
+ if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
+ if (userSql != pParentSql) {
+ tscFreeRetrieveSup(pParentSql);
+ }
+
+ tscFreeSubobj(userSql);
+ tfree(userSql->pSubs);
+
+ userSql->res.code = TSDB_CODE_SUCCESS;
+ userSql->retry++;
+
+ tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
+ tstrerror(code), userSql->retry);
+
+ tscResetSqlCmd(&userSql->cmd, true);
+ code = tsParseSql(userSql, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ return;
+ }
+
+ if (code != TSDB_CODE_SUCCESS) {
+ userSql->res.code = code;
+ tscAsyncResultOnError(userSql);
+ return;
+ }
+
+ pQueryInfo = tscGetQueryInfo(&userSql->cmd);
+ executeQuery(userSql, pQueryInfo);
+ } else {
+ (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
+ }
} else { // regular super table query
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscAsyncResultOnError(pParentSql);
@@ -2765,20 +2898,23 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* pSql) {
+ if (trsupport->pExtMemBuffer == NULL){
+ return;
+ }
int32_t idx = trsupport->subqueryIndex;
SSqlObj * pParentSql = trsupport->pParentSql;
tOrderDescriptor *pDesc = trsupport->pOrderDescriptor;
SSubqueryState* pState = &pParentSql->subState;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
// data in from current vnode is stored in cache and disk
uint32_t numOfRowsFromSubquery = (uint32_t)(trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num);
SVgroupsInfo* vgroupsInfo = pTableMetaInfo->vgroupList;
- tscDebug("0x%"PRIx64" sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self, pSql,
- vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql->self,
+ pSql->self, vgroupsInfo->vgroups[0].epAddr[0].fqdn, vgroupsInfo->vgroups[0].vgId, numOfRowsFromSubquery, idx);
tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity);
@@ -2806,7 +2942,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
}
if (!subAndCheckDone(pSql, pParentSql, idx)) {
- tscDebug("0x%"PRIx64" sub:%p orderOfSub:%d freed, not finished", pParentSql->self, pSql, trsupport->subqueryIndex);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d freed, not finished", pParentSql->self, pSql->self,
+ trsupport->subqueryIndex);
tscFreeRetrieveSup(pSql);
return;
@@ -2818,20 +2955,50 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
tscDebug("0x%"PRIx64" retrieve from %d vnodes completed.final NumOfRows:%" PRId64 ",start to build loser tree",
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
- SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
- tscClearInterpInfo(pPQueryInfo);
+ SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
- tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
+ code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
+ pParentSql->res.code = code;
+
+ if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) {
+ pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty
+ } else {
+ pParentSql->cmd.command = TSDB_SQL_RETRIEVE_GLOBALMERGE;
+ }
+
+ tscCreateResPointerInfo(&pParentSql->res, pPQueryInfo);
+
tscDebug("0x%"PRIx64" build loser tree completed", pParentSql->self);
pParentSql->res.precision = pSql->res.precision;
pParentSql->res.numOfRows = 0;
pParentSql->res.row = 0;
-
+ pParentSql->res.numOfGroups = 0;
+
tscFreeRetrieveSup(pSql);
// set the command flag must be after the semaphore been correctly set.
- pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
+ if (pParentSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
+ pParentSql->cmd.command = TSDB_SQL_RETRIEVE_GLOBALMERGE;
+
+ SQueryInfo *pQueryInfo2 = tscGetQueryInfo(&pParentSql->cmd);
+
+ size_t size = tscNumOfExprs(pQueryInfo);
+ for (int32_t j = 0; j < size; ++j) {
+ SExprInfo* pExprInfo = tscExprGet(pQueryInfo2, j);
+
+ int32_t functionId = pExprInfo->base.functionId;
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo2->pUdfInfo, -1 * functionId - 1);
+ code = initUdfInfo(pUdfInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ pParentSql->res.code = code;
+ tscAsyncResultOnError(pParentSql);
+ }
+ }
+ }
+ }
+
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
@@ -2856,7 +3023,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SSubqueryState* pState = &pParentSql->subState;
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
@@ -2894,16 +3061,16 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
}
SSqlRes * pRes = &pSql->res;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
if (numOfRows > 0) {
assert(pRes->numOfRows == numOfRows);
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
- tscDebug("0x%"PRIx64" sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
- pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
+ pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
- if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfoDetail(&pParentSql->cmd, 0)->distinctTag)) {
+ if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
@@ -2946,15 +3113,16 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
SSqlObj *pNew = createSubqueryObj(pSql, table_index, tscRetrieveDataRes, trsupport, TSDB_SQL_SELECT, prevSqlObj);
if (pNew != NULL) { // the sub query of two-stage super table query
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pNew->cmd);
+ pNew->cmd.active = pQueryInfo;
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
// clear the limit/offset info, since it should not be sent to vnode to be executed.
pQueryInfo->limit.limit = -1;
pQueryInfo->limit.offset = 0;
- assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1 && trsupport->subqueryIndex < pSql->subState.numOfSub);
+ assert(trsupport->subqueryIndex < pSql->subState.numOfSub);
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
@@ -2980,10 +3148,10 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
SSqlObj* pParentSql = trsupport->pParentSql;
SSqlObj* pSql = (SSqlObj *) tres;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
- assert(pSql->cmd.numOfClause == 1 && pQueryInfo->numOfTables == 1);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ assert(pQueryInfo->numOfTables == 1);
- STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
+ STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex];
// stable query killed or other subquery failed, all query stopped
@@ -3006,17 +3174,16 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(code == taos_errno(pSql));
- if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
+ if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0;
-
tscReissueSubquery(trsupport, pSql, code, &sent);
if (sent) {
return;
}
} else {
- tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times or no need to retry, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
}
@@ -3082,13 +3249,14 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
pParentObj->res.code = pSql->res.code;
// set the flag in the parent sqlObj
- if (pSql->cmd.submitSchema) {
- pParentObj->cmd.submitSchema = 1;
+ if (pSql->cmd.insertParam.schemaAttached) {
+ pParentObj->cmd.insertParam.schemaAttached = 1;
}
}
-
+
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
- tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, pSupporter->index, pParentObj->subState.numOfSub);
+ // concurrency problem, other thread already release pParentObj
+ //tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub);
return;
}
@@ -3118,8 +3286,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
// clean up tableMeta in cache
tscFreeQueryInfo(&pSql->cmd, false);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
- STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfoS(&pSql->cmd);
+ STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, 0);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
subquerySetState(pSql, &pParentObj->subState, i, 0);
@@ -3131,15 +3299,14 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
pParentObj->res.numOfRows, numOfFailed, numOfSub);
- tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable", pParentObj->self, pParentObj->cmd.numOfTables);
- for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
+ tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable before reparse sql", pParentObj->self, pParentObj->cmd.insertParam.numOfTables);
+ for(int32_t i = 0; i < pParentObj->cmd.insertParam.numOfTables; ++i) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ tNameExtractFullName(pParentObj->cmd.insertParam.pTableNameList[i], name);
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
- pParentObj->cmd.parseFinished = false;
-
+ pParentObj->res.code = TSDB_CODE_SUCCESS;
tscResetSqlCmd(&pParentObj->cmd, false);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
@@ -3157,7 +3324,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
return;
}
- tscDoQuery(pParentObj);
+ tscHandleMultivnodeInsert(pParentObj);
}
}
@@ -3173,7 +3340,7 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) {
SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param;
assert(pSupporter->index < pSupporter->pSql->subState.numOfSub);
- STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.pDataBlocks, pSupporter->index);
+ STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.insertParam.pDataBlocks, pSupporter->index);
int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
if ((pRes->code = code)!= TSDB_CODE_SUCCESS) {
@@ -3181,7 +3348,7 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) {
return code; // here the pSql may have been released already.
}
- return tscProcessSql(pSql);
+ return tscBuildAndSendRequest(pSql, NULL);
}
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
@@ -3190,13 +3357,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
// it is the failure retry insert
if (pSql->pSubs != NULL) {
- int32_t blockNum = (int32_t)taosArrayGetSize(pCmd->pDataBlocks);
+ int32_t blockNum = (int32_t)taosArrayGetSize(pCmd->insertParam.pDataBlocks);
if (pSql->subState.numOfSub != blockNum) {
tscError("0x%"PRIx64" sub num:%d is not same with data block num:%d", pSql->self, pSql->subState.numOfSub, blockNum);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return pRes->code;
}
-
+
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));
@@ -3204,7 +3371,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
pSup->pSql = pSql;
pSub->param = pSup;
- tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, i);
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch sub insert, orderOfSub:%d", pSql->self, pSub->self, i);
if (pSub->res.code != TSDB_CODE_SUCCESS) {
tscHandleInsertRetry(pSql, pSub);
}
@@ -3213,7 +3380,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
- pSql->subState.numOfSub = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks);
+ pSql->subState.numOfSub = (uint16_t)taosArrayGetSize(pCmd->insertParam.pDataBlocks);
assert(pSql->subState.numOfSub > 0);
pRes->code = TSDB_CODE_SUCCESS;
@@ -3263,7 +3430,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
pNew->fetchFp = pNew->fp;
pSql->pSubs[numOfSub] = pNew;
- STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, numOfSub);
+ STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->insertParam.pDataBlocks, numOfSub);
pRes->code = tscCopyDataBlockToPayload(pNew, pTableDataBlock);
if (pRes->code == TSDB_CODE_SUCCESS) {
tscDebug("0x%"PRIx64" sub:%p create subObj success. orderOfSub:%d", pSql->self, pNew, numOfSub);
@@ -3281,13 +3448,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
goto _error;
}
- pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
// use the local variable
for (int32_t j = 0; j < numOfSub; ++j) {
SSqlObj *pSub = pSql->pSubs[j];
tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, j);
- tscProcessSql(pSub);
+ tscBuildAndSendRequest(pSub, NULL);
}
return TSDB_CODE_SUCCESS;
@@ -3297,21 +3464,23 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
}
static char* getResultBlockPosition(SSqlCmd* pCmd, SSqlRes* pRes, int32_t columnIndex, int16_t* bytes) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, columnIndex);
- assert(pInfo->pSqlExpr != NULL);
-
- *bytes = pInfo->pSqlExpr->resBytes;
- char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + pRes->row * (*bytes);
+ assert(pInfo->pExpr->pExpr == NULL);
- return pData;
+ *bytes = pInfo->pExpr->base.resBytes;
+ if (pRes->data != NULL) {
+ return pRes->data + pInfo->pExpr->base.offset * pRes->numOfRows + pRes->row * (*bytes);
+ } else {
+ return ((char*)pRes->urow[columnIndex]) + pRes->row * (*bytes);
+ }
}
static void doBuildResFromSubqueries(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
int32_t numOfRes = INT32_MAX;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
@@ -3325,10 +3494,12 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
}
if (numOfRes == 0) { // no result any more, free all subquery objects
+ pSql->res.completed = true;
freeJoinSubqueryObj(pSql);
return;
}
+// tscRestoreFuncForSTableQuery(pQueryInfo);
int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList);
assert(numOfRes * rowSize > 0);
@@ -3348,7 +3519,20 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
int16_t bytes = 0;
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+ tscRestoreFuncForSTableQuery(pQueryInfo);
+ tscFieldInfoUpdateOffset(pQueryInfo);
+ for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
+ SSqlObj* pSub = pSql->pSubs[i];
+ if (pSub == NULL) {
+ continue;
+ }
+
+ SQueryInfo* pSubQueryInfo = pSub->cmd.pQueryInfo;
+ tscRestoreFuncForSTableQuery(pSubQueryInfo);
+ tscFieldInfoUpdateOffset(pSubQueryInfo);
+ }
+
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SColumnIndex* pIndex = &pRes->pColumnIndex[i];
SSqlRes* pRes1 = &pSql->pSubs[pIndex->tableIndex]->res;
@@ -3357,6 +3541,8 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
char* pData = getResultBlockPosition(pCmd1, pRes1, pIndex->columnIndex, &bytes);
memcpy(data, pData, bytes * numOfRes);
+ pRes->dataConverted = pRes1->dataConverted;
+
data += bytes * numOfRes;
}
@@ -3382,7 +3568,7 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
doArithmeticCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize);
pRes->data = pFilePage->data;
- tscSetResRawPtr(pRes, pQueryInfo);
+ tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted);
}
void tscBuildResFromSubqueries(SSqlObj *pSql) {
@@ -3394,8 +3580,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
}
if (pRes->tsrow == NULL) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
- pRes->numOfCols = (int16_t) tscSqlExprNumOfExprs(pQueryInfo);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ pRes->numOfCols = (int16_t) tscNumOfExprs(pQueryInfo);
pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES);
pRes->urow = calloc(pRes->numOfCols, POINTER_BYTES);
@@ -3407,8 +3593,6 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
tscAsyncResultOnError(pSql);
return;
}
-
- tscRestoreFuncForSTableQuery(pQueryInfo);
}
assert (pRes->row >= pRes->numOfRows);
@@ -3424,18 +3608,18 @@ char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) {
SArithmeticSupport *pSupport = (SArithmeticSupport *) param;
int32_t index = -1;
- SSqlExpr* pExpr = NULL;
+ SExprInfo* pExpr = NULL;
for (int32_t i = 0; i < pSupport->numOfCols; ++i) {
pExpr = taosArrayGetP(pSupport->exprList, i);
- if (strncmp(name, pExpr->aliasName, sizeof(pExpr->aliasName) - 1) == 0) {
+ if (strncmp(name, pExpr->base.aliasName, sizeof(pExpr->base.aliasName) - 1) == 0) {
index = i;
break;
}
}
assert(index >= 0 && index < pSupport->numOfCols);
- return pSupport->data[index] + pSupport->offset * pExpr->resBytes;
+ return pSupport->data[index] + pSupport->offset * pExpr->base.resBytes;
}
TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
@@ -3448,23 +3632,29 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
return pRes->tsrow;
}
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
size_t size = tscNumOfFields(pQueryInfo);
+
+ int32_t j = 0;
for (int i = 0; i < size; ++i) {
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
+ if (!pInfo->visible) {
+ continue;
+ }
int32_t type = pInfo->field.type;
int32_t bytes = pInfo->field.bytes;
if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) {
- pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i];
+ pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i];
} else {
- pRes->tsrow[i] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]);
- pRes->length[i] = varDataLen(pRes->urow[i]);
+ pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]);
+ pRes->length[j] = varDataLen(pRes->urow[i]);
}
((char**) pRes->urow)[i] += bytes;
+ j += 1;
}
pRes->row++; // index increase one-step
@@ -3475,23 +3665,23 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
bool hasData = true;
SSqlCmd *pCmd = &pSql->cmd;
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
bool allSubqueryExhausted = true;
-
+
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
if (pSql->pSubs[i] == NULL) {
continue;
}
-
+
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd;
-
- SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex);
+
+ SQueryInfo *pQueryInfo1 = tscGetQueryInfo(pCmd1);
assert(pQueryInfo1->numOfTables == 1);
-
+
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0);
-
+
/*
* if the global limitation is not reached, and current result has not exhausted, or next more vnodes are
* available, goes on
@@ -3502,19 +3692,20 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
break;
}
}
-
+
hasData = !allSubqueryExhausted;
} else { // otherwise, in case inner join, if any subquery exhausted, query completed.
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
if (pSql->pSubs[i] == 0) {
continue;
}
-
+
SSqlRes * pRes1 = &pSql->pSubs[i]->res;
- SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
+ SQueryInfo *pQueryInfo1 = tscGetQueryInfo(&pSql->pSubs[i]->cmd);
if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) &&
- tscIsProjectionQuery(pQueryInfo1)) || (pRes1->numOfRows == 0)) {
+ tscIsProjectionQuery(pQueryInfo1)) ||
+ (pRes1->numOfRows == 0)) {
hasData = false;
break;
}
@@ -3523,3 +3714,122 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData;
}
+
+void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
+ char* sql, void* merger, int32_t stage, uint64_t qId) {
+ assert(pQueryInfo != NULL);
+ SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
+ if (pQInfo == NULL) {
+ goto _cleanup;
+ }
+
+ // to make sure third party won't overwrite this structure
+ pQInfo->signature = pQInfo;
+ pQInfo->qId = qId;
+ SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
+ SQueryAttr *pQueryAttr = &pQInfo->query;
+
+ pRuntimeEnv->pQueryAttr = pQueryAttr;
+ tscCreateQueryFromQueryInfo(pQueryInfo, pQueryAttr, NULL);
+
+ pQueryAttr->tableGroupInfo = *pTableGroupInfo;
+
+ // calculate the result row size
+ SExprInfo* pEx = NULL;
+ int32_t num = 0;
+ if (pQueryAttr->pExpr3 != NULL) {
+ pEx = pQueryAttr->pExpr3;
+ num = pQueryAttr->numOfExpr3;
+ } else if (pQueryAttr->pExpr2 != NULL) {
+ pEx = pQueryAttr->pExpr2;
+ num = pQueryAttr->numOfExpr2;
+ } else {
+ pEx = pQueryAttr->pExpr1;
+ num = pQueryAttr->numOfOutput;
+ }
+
+ for (int16_t col = 0; col < num; ++col) {
+ pQueryAttr->resultRowSize += pEx[col].base.resBytes;
+
+ // keep the tag length
+ if (TSDB_COL_IS_TAG(pEx[col].base.colInfo.flag)) {
+ pQueryAttr->tagLen += pEx[col].base.resBytes;
+ }
+ }
+
+ size_t numOfGroups = 0;
+ if (pTableGroupInfo->pGroupList != NULL) {
+ numOfGroups = taosArrayGetSize(pTableGroupInfo->pGroupList);
+ STableGroupInfo* pTableqinfo = &pQInfo->runtimeEnv.tableqinfoGroupInfo;
+
+ pTableqinfo->pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES);
+ pTableqinfo->numOfTables = pTableGroupInfo->numOfTables;
+ pTableqinfo->map = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+ }
+
+ pQInfo->pBuf = calloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
+ if (pQInfo->pBuf == NULL) {
+ goto _cleanup;
+ }
+
+ pQInfo->dataReady = QUERY_RESULT_NOT_READY;
+ pQInfo->rspContext = NULL;
+ pQInfo->sql = sql;
+
+ pthread_mutex_init(&pQInfo->lock, NULL);
+ tsem_init(&pQInfo->ready, 0, 0);
+
+ int32_t index = 0;
+ for(int32_t i = 0; i < numOfGroups; ++i) {
+ SArray* pa = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i);
+
+ size_t s = taosArrayGetSize(pa);
+ SArray* p1 = taosArrayInit(s, POINTER_BYTES);
+ if (p1 == NULL) {
+ goto _cleanup;
+ }
+
+ taosArrayPush(pRuntimeEnv->tableqinfoGroupInfo.pGroupList, &p1);
+
+ STimeWindow window = pQueryAttr->window;
+ for(int32_t j = 0; j < s; ++j) {
+ STableKeyInfo* info = taosArrayGet(pa, j);
+ window.skey = info->lastKey;
+
+ void* buf = (char*) pQInfo->pBuf + index * sizeof(STableQueryInfo);
+ STableQueryInfo* item = createTableQueryInfo(pQueryAttr, info->pTable, pQueryAttr->groupbyColumn, window, buf);
+ if (item == NULL) {
+ goto _cleanup;
+ }
+
+ item->groupIndex = i;
+ taosArrayPush(p1, &item);
+
+ STableId id = {.tid = 0, .uid = 0};
+ taosHashPut(pRuntimeEnv->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
+ index += 1;
+ }
+ }
+
+ // todo refactor: filter should not be applied here.
+ createFilterInfo(pQueryAttr, 0);
+
+ SArray* pa = NULL;
+ if (stage == MASTER_SCAN) {
+ pQueryAttr->createFilterOperator = false; // no need for parent query
+ pa = createExecOperatorPlan(pQueryAttr);
+ } else {
+ pa = createGlobalMergePlan(pQueryAttr);
+ }
+
+ STsBufInfo bufInfo = {0};
+ SQueryParam param = {.pOperator = pa};
+ /*int32_t code = */initQInfo(&bufInfo, NULL, pSourceOperator, pQInfo, ¶m, NULL, 0, merger);
+ taosArrayDestroy(pa);
+
+ return pQInfo;
+
+ _cleanup:
+ freeQInfo(pQInfo);
+ return NULL;
+}
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index e88b5b20347a117e8606b6c9eefdcf32a7f55bb5..8af340030cccee1431a82eb88344642011f2e019 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -19,15 +19,13 @@
#include "trpc.h"
#include "tnote.h"
#include "ttimer.h"
-#include "tutil.h"
#include "tsched.h"
#include "tscLog.h"
-#include "tscUtil.h"
#include "tsclient.h"
#include "tglobal.h"
#include "tconfig.h"
#include "ttimezone.h"
-#include "tlocale.h"
+#include "qScript.h"
// global, not configurable
#define TSC_VAR_NOT_RELEASE 1
@@ -35,8 +33,10 @@
int32_t sentinel = TSC_VAR_NOT_RELEASE;
-SHashObj *tscVgroupMap; // hash map to keep the global vgroup info
-SHashObj *tscTableMetaInfo; // table meta info
+SHashObj *tscVgroupMap; // hash map to keep the vgroup info from mnode
+SHashObj *tscTableMetaMap; // table meta info buffer
+SCacheObj *tscVgroupListBuf; // super table vgroup list information, only survives 5 seconds for each super table vgroup list
+
int32_t tscObjRef = -1;
void *tscTmr;
void *tscQhandle;
@@ -44,17 +44,21 @@ int32_t tscRefId = -1;
int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
void *tscRpcCache; // cache to keep rpc obj
-int32_t tscNumOfThreads = 1; // num of rpc threads
-char tscLogFileName[12] = "taoslog";
-int tscLogFileNum = 10;
-static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
-static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
+int32_t tscNumOfThreads = 1; // num of rpc threads
+char tscLogFileName[12] = "taoslog";
+int tscLogFileNum = 10;
+
+static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
+static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
+
+// pthread_once can not return result code, so result code is set to a global variable.
static volatile int tscInitRes = 0;
void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) {
taosGetDisk();
taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
+
void tscFreeRpcObj(void *param) {
assert(param);
SRpcObj *pRpcObj = (SRpcObj *)(param);
@@ -66,10 +70,9 @@ void tscReleaseRpc(void *param) {
if (param == NULL) {
return;
}
- pthread_mutex_lock(&rpcObjMutex);
- taosCacheRelease(tscRpcCache, (void *)¶m, false);
- pthread_mutex_unlock(&rpcObjMutex);
-}
+
+ taosCacheRelease(tscRpcCache, (void *)¶m, false);
+}
int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncrypt, void **ppRpcObj) {
pthread_mutex_lock(&rpcObjMutex);
@@ -79,7 +82,7 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
*ppRpcObj = pRpcObj;
pthread_mutex_unlock(&rpcObjMutex);
return 0;
- }
+ }
SRpcInit rpcInit;
memset(&rpcInit, 0, sizeof(rpcInit));
@@ -103,7 +106,8 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
pthread_mutex_unlock(&rpcObjMutex);
tscError("failed to init connection to TDengine");
return -1;
- }
+ }
+
pRpcObj = taosCachePut(tscRpcCache, rpcObj.key, strlen(rpcObj.key), &rpcObj, sizeof(rpcObj), 1000*5);
if (pRpcObj == NULL) {
rpcClose(rpcObj.pDnodeConn);
@@ -117,7 +121,11 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
}
void taos_init_imp(void) {
- char temp[128] = {0};
+ char temp[128] = {0};
+
+ // In the APIs of other program language, taos_cleanup is not available yet.
+ // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
+ atexit(taos_cleanup);
errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec());
@@ -148,36 +156,43 @@ void taos_init_imp(void) {
taosInitNotes();
rpcInit();
+
+ scriptEnvPoolInit();
+
tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
taosSetCoreDump();
tscInitMsgsFp();
- int queueSize = tsMaxConnections*2;
double factor = (tscEmbedded == 0)? 2.0:4.0;
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
if (tscNumOfThreads < 2) {
tscNumOfThreads = 2;
}
+
+ int32_t queueSize = tsMaxConnections*2;
tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc");
if (NULL == tscQhandle) {
- tscError("failed to init scheduler");
+ tscError("failed to init task queue");
tscInitRes = -1;
return;
}
+ tscDebug("client task queue is initialized, numOfWorkers: %d", tscNumOfThreads);
+
tscTmr = taosTmrInit(tsMaxConnections * 2, 200, 60000, "TSC");
if(0 == tscEmbedded){
taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
- if (tscTableMetaInfo == NULL) {
- tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
- tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
- tscTableMetaInfo = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- tscDebug("TableMeta:%p", tscTableMetaInfo);
+ if (tscTableMetaMap == NULL) {
+ tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
+ tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
+ tscTableMetaMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
+ tscVgroupListBuf = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, NULL, "stable-vgroup-list");
+ tscDebug("TableMeta:%p, vgroup:%p is initialized", tscTableMetaMap, tscVgroupMap);
}
int refreshTime = 5;
@@ -186,14 +201,13 @@ void taos_init_imp(void) {
tscRefId = taosOpenRef(200, tscCloseTscObj);
- // in other language APIs, taos_cleanup is not available yet.
- // So, to make sure taos_cleanup will be invoked to clean up the allocated
- // resource to suppress the valgrind warning.
- atexit(taos_cleanup);
tscDebug("client is initialized successfully");
}
-int taos_init() { pthread_once(&tscinit, taos_init_imp); return tscInitRes;}
+int taos_init() {
+ pthread_once(&tscinit, taos_init_imp);
+ return tscInitRes;
+}
// this function may be called by user or system, or by both simultaneously.
void taos_cleanup(void) {
@@ -203,8 +217,12 @@ void taos_cleanup(void) {
return;
}
- taosHashCleanup(tscTableMetaInfo);
- tscTableMetaInfo = NULL;
+ if (tscEmbedded == 0) {
+ scriptEnvPoolCleanup();
+ }
+
+ taosHashCleanup(tscTableMetaMap);
+ tscTableMetaMap = NULL;
taosHashCleanup(tscVgroupMap);
tscVgroupMap = NULL;
@@ -231,6 +249,9 @@ void taos_cleanup(void) {
pthread_mutex_destroy(&rpcObjMutex);
}
+ taosCacheCleanup(tscVgroupListBuf);
+ tscVgroupListBuf = NULL;
+
if (tscEmbedded == 0) {
rpcCleanup();
taosCloseLog();
@@ -290,16 +311,24 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
if (strlen(tsLocale) == 0) { // locale does not set yet
char* defaultLocale = setlocale(LC_CTYPE, "");
+
+ // The locale of the current OS does not be set correctly, so the default locale cannot be acquired.
+ // The launch of current system will abort soon.
+ if (defaultLocale == NULL) {
+ tscError("failed to get default locale, please set the correct locale in current OS");
+ return -1;
+ }
+
tstrncpy(tsLocale, defaultLocale, TSDB_LOCALE_LEN);
}
// set the user specified locale
char *locale = setlocale(LC_CTYPE, pStr);
- if (locale != NULL) {
+ if (locale != NULL) { // failed to set the user specified locale
tscInfo("locale set, prev locale:%s, new locale:%s", tsLocale, locale);
cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
- } else { // set the user-specified localed failed, use default LC_CTYPE as current locale
+ } else { // set the user specified locale failed, use default LC_CTYPE as current locale
locale = setlocale(LC_CTYPE, tsLocale);
tscInfo("failed to set locale:%s, current locale:%s", pStr, tsLocale);
}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 5e59f54c8898d61b64109ed699d1431723553878..a71ed588dba32ffae12408b289bda21dea112f8e 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -16,21 +16,98 @@
#include "tscUtil.h"
#include "hash.h"
#include "os.h"
-#include "texpr.h"
#include "taosmsg.h"
+#include "texpr.h"
#include "tkey.h"
#include "tmd5.h"
-#include "tscLocalMerge.h"
+#include "tscGlobalmerge.h"
#include "tscLog.h"
#include "tscProfile.h"
#include "tscSubquery.h"
-#include "tschemautil.h"
+#include "tsched.h"
+#include "qTableMeta.h"
#include "tsclient.h"
#include "ttimer.h"
#include "ttokendef.h"
+#include "httpInt.h"
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
-static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
+
+int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len) {
+ int32_t n = 0;
+
+ switch (type) {
+ case TSDB_DATA_TYPE_NULL:
+ n = sprintf(str, "null");
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ n = sprintf(str, (*(int8_t*)buf) ? "true" : "false");
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ n = sprintf(str, "%d", *(int8_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ n = sprintf(str, "%d", *(int16_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ n = sprintf(str, "%d", *(int32_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ n = sprintf(str, "%" PRId64, *(int64_t*)buf);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ n = sprintf(str, "%d", *(uint8_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_USMALLINT:
+ n = sprintf(str, "%d", *(uint16_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_UINT:
+ n = sprintf(str, "%d", *(uint32_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_UBIGINT:
+ n = sprintf(str, "%" PRId64, *(uint64_t*)buf);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ n = sprintf(str, "%f", GET_FLOAT_VAL(buf));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ n = sprintf(str, "%f", GET_DOUBLE_VAL(buf));
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ if (bufSize < 0) {
+ tscError("invalid buf size");
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ *str = '"';
+ memcpy(str + 1, buf, bufSize);
+ *(str + bufSize + 1) = '"';
+ n = bufSize + 2;
+ break;
+
+ default:
+ tscError("unsupported type:%d", type);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+
+ *len = n;
+
+ return TSDB_CODE_SUCCESS;
+}
+
static void tscStrToLower(char *str, int32_t n) {
if (str == NULL || n <= 0) { return;}
@@ -78,14 +155,14 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) {
}
bool tscQueryTags(SQueryInfo* pQueryInfo) {
- int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfCols = (int32_t) tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- int32_t functId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int32_t functId = pExpr->base.functionId;
// "select count(tbname)" query
- if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
+ if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
continue;
}
@@ -98,13 +175,12 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
}
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo) {
- int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfCols = (int32_t) tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- int32_t functId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ int32_t functId = pExpr->base.functionId;
- // "select count(tbname)" query
if (functId == TSDB_FUNC_BLKINFO) {
return true;
}
@@ -147,14 +223,23 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
* 1. failed to get tableMeta from server; 2. not a super table; 3. limitation is 0;
* 4. show queries, instead of a select query
*/
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
if (pTableMetaInfo == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) ||
pQueryInfo->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || numOfExprs == 0) {
return false;
}
for (int32_t i = 0; i < numOfExprs; ++i) {
- int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
+ int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
+
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
+ if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
+ return false;
+ }
+
+ continue;
+ }
if (functionId != TSDB_FUNC_PRJ &&
functionId != TSDB_FUNC_TAGPRJ &&
@@ -162,6 +247,9 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
functionId != TSDB_FUNC_TS &&
functionId != TSDB_FUNC_ARITHM &&
functionId != TSDB_FUNC_TS_COMP &&
+ functionId != TSDB_FUNC_DIFF &&
+ functionId != TSDB_FUNC_DERIVATIVE &&
+ functionId != TSDB_FUNC_TS_DUMMY &&
functionId != TSDB_FUNC_TID_TAG) {
return false;
}
@@ -190,13 +278,17 @@ bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableInde
}
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
+ int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
+ if (f == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
- if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG &&
- functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
+ if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG &&
+ f != TSDB_FUNC_TS && f != TSDB_FUNC_ARITHM && f != TSDB_FUNC_DIFF &&
+ f != TSDB_FUNC_DERIVATIVE) {
return false;
}
}
@@ -204,14 +296,49 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
return true;
}
+bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
+ size_t size = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < size; ++i) {
+ int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
+ if (f == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
+ // filter on primary timestamp column
+ if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
+ return true;
+ }
+
+ size_t size = taosArrayGetSize(pQueryInfo->colList);
+ for (int32_t i = 0; i < size; ++i) {
+ SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
+ if (pCol->info.flist.numOfFilters > 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
- size_t size = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
assert(pExpr != NULL);
- int32_t functionId = pExpr->functionId;
- if (functionId == TSDB_FUNC_TAG) {
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) {
continue;
}
@@ -223,19 +350,23 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
return true;
}
-bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) {
+bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) {
if (tscIsProjectionQuery(pQueryInfo)) {
return false;
}
size_t numOfOutput = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfOutput; ++i) {
- SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo;
- if (pExprInfo != NULL) {
+ SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pExpr;
+ if (pExprInfo->pExpr != NULL) {
return true;
}
}
+ if (tscIsProjectionQuery(pQueryInfo)) {
+ return false;
+ }
+
return false;
}
@@ -243,7 +374,7 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
- SSqlGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
+ SGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
if (!TSDB_COL_IS_TAG(pIndex->flag) && pIndex->colIndex < numOfCols) { // group by normal columns
@@ -254,16 +385,124 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TS) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TS) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool isTsCompQuery(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ SExprInfo* pExpr1 = tscExprGet(pQueryInfo, 0);
+ if (numOfExprs != 1) {
+ return false;
+ }
+
+ return pExpr1->base.functionId == TSDB_FUNC_TS_COMP;
+}
+
+bool hasTagValOutput(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ SExprInfo* pExpr1 = tscExprGet(pQueryInfo, 0);
+
+ if (numOfExprs == 1 && pExpr1->base.functionId == TSDB_FUNC_TS_COMP) {
+ return true;
+ }
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ // ts_comp column required the tag value for join filter
+ if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool isStabledev(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId == TSDB_FUNC_STDDEV_DST) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
- int32_t functionId = pExpr->functionId;
- if (functionId == TSDB_FUNC_TWA) {
+ if (pExpr->base.functionId == TSDB_FUNC_TWA) {
return true;
}
}
@@ -271,16 +510,15 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
return false;
}
-bool tscIsTopbotQuery(SQueryInfo* pQueryInfo) {
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+bool tscIsIrateQuery(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
- int32_t functionId = pExpr->functionId;
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
+ if (pExpr->base.functionId == TSDB_FUNC_IRATE) {
return true;
}
}
@@ -288,23 +526,83 @@ bool tscIsTopbotQuery(SQueryInfo* pQueryInfo) {
return false;
}
-int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo) {
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo) {
+ return pQueryInfo->sessionWindow.gap > 0;
+}
+
+bool tscNeedReverseScan(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
- int32_t functionId = pExpr->functionId;
- if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
- return (int32_t) pExpr->param[0].i64;
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) {
+ continue;
+ }
+
+ if ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
+ return true;
+ }
+
+ if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) {
+ // the scan order to acquire the last result of the specified column
+ int32_t order = (int32_t)pExpr->base.param[0].i64;
+ if (order != pQueryInfo->order.order) {
+ return true;
+ }
}
}
- return 0;
+ return false;
+}
+
+bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
+ if (pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0) {
+ return false;
+ }
+
+ if (tscIsDiffDerivQuery(pQueryInfo)) {
+ return false;
+ }
+
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
+ if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) {
+ return true;
+ }
+
+ continue;
+ }
+
+ if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if ((!IS_MULTIOUTPUT(aAggs[functionId].status)) ||
+ (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TS_COMP)) {
+ return true;
+ }
+ }
+
+ return false;
}
+bool isBlockDistQuery(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ return (numOfExprs == 1 && pExpr->base.functionId == TSDB_FUNC_BLKINFO);
+}
void tscClearInterpInfo(SQueryInfo* pQueryInfo) {
if (!tscIsPointInterpQuery(pQueryInfo)) {
@@ -316,9 +614,9 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo) {
}
int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
- if (pRes->tsrow == NULL) {
- pRes->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
+ pRes->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
+ if (pRes->tsrow == NULL) {
pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES);
pRes->urow = calloc(pRes->numOfCols, POINTER_BYTES);
pRes->length = calloc(pRes->numOfCols, sizeof(int32_t));
@@ -339,11 +637,72 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
- assert(pRes->numOfCols > 0);
+static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bool convertNchar) {
+ // generated the user-defined column result
+ if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.colInfo.flag)) {
+ if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) {
+ setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
+ } else {
+ if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) {
+ assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes);
- int32_t offset = 0;
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
+
+ memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen);
+ varDataSetLen(p, pInfo->pExpr->base.param[1].nLen);
+ }
+ } else {
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
+ memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes);
+ }
+ }
+ }
+ } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
+ // convert unicode to native code in a temporary buffer extra one byte for terminated symbol
+ char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ if(buffer == NULL)
+ return ;
+ pRes->buffer[i] = buffer;
+ // string terminated char for binary data
+ memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
+
+ char* p = pRes->urow[i];
+ for (int32_t k = 0; k < pRes->numOfRows; ++k) {
+ char* dst = pRes->buffer[i] + k * pInfo->field.bytes;
+
+ if (isNull(p, TSDB_DATA_TYPE_NCHAR)) {
+ memcpy(dst, p, varDataTLen(p));
+ } else if (varDataLen(p) > 0) {
+ int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
+ varDataSetLen(dst, length);
+
+ if (length == 0) {
+ tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
+ }
+ } else {
+ varDataSetLen(dst, 0);
+ }
+
+ p += pInfo->field.bytes;
+ }
+
+ memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ }
+
+ if (convertNchar) {
+ pRes->dataConverted = true;
+ }
+}
+
+void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted) {
+ assert(pRes->numOfCols > 0);
+ if (pRes->numOfRows == 0) {
+ return;
+ }
+ int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
@@ -351,30 +710,46 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
pRes->length[i] = pInfo->field.bytes;
offset += pInfo->field.bytes;
+ setResRawPtrImpl(pRes, pInfo, i, converted ? false : true);
+ }
+}
+
+void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar) {
+ assert(pRes->numOfCols > 0);
+
+ for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
+ SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
+
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
+ pRes->urow[i] = pColData->pData;
+ pRes->length[i] = pInfo->field.bytes;
+
+ setResRawPtrImpl(pRes, pInfo, i, convertNchar);
+ /*
// generated the user-defined column result
- if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
- if (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) {
+ if (pInfo->pExpr->pExpr == NULL && TSDB_COL_IS_UD_COL(pInfo->pExpr->base.ColName.flag)) {
+ if (pInfo->pExpr->base.param[1].nType == TSDB_DATA_TYPE_NULL) {
setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
} else {
if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) {
- assert(pInfo->pSqlExpr->param[1].nLen <= pInfo->field.bytes);
+ assert(pInfo->pExpr->base.param[1].nLen <= pInfo->field.bytes);
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
- memcpy(varDataVal(p), pInfo->pSqlExpr->param[1].pz, pInfo->pSqlExpr->param[1].nLen);
- varDataSetLen(p, pInfo->pSqlExpr->param[1].nLen);
+ memcpy(varDataVal(p), pInfo->pExpr->base.param[1].pz, pInfo->pExpr->base.param[1].nLen);
+ varDataSetLen(p, pInfo->pExpr->base.param[1].nLen);
}
} else {
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
- memcpy(p, &pInfo->pSqlExpr->param[1].i64, pInfo->field.bytes);
+ memcpy(p, &pInfo->pExpr->base.param[1].i64, pInfo->field.bytes);
}
}
}
- } else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
+ } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
@@ -402,19 +777,551 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
}
memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ }*/
+ }
+}
+
+static SColumnInfo* extractColumnInfoFromResult(SArray* pTableCols) {
+ int32_t numOfCols = (int32_t) taosArrayGetSize(pTableCols);
+ SColumnInfo* pColInfo = calloc(numOfCols, sizeof(SColumnInfo));
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SColumn* pCol = taosArrayGetP(pTableCols, i);
+ pColInfo[i] = pCol->info;//[index].type;
+ }
+
+ return pColInfo;
+}
+
+typedef struct SDummyInputInfo {
+ SSDataBlock *block;
+ STableQueryInfo *pTableQueryInfo;
+ SSqlObj *pSql; // refactor: remove it
+ int32_t numOfFilterCols;
+ SSingleColumnFilterInfo *pFilterInfo;
+} SDummyInputInfo;
+
+typedef struct SJoinStatus {
+ SSDataBlock* pBlock; // point to the upstream block
+ int32_t index;
+ bool completed;// current upstream is completed or not
+} SJoinStatus;
+
+typedef struct SJoinOperatorInfo {
+ SSDataBlock *pRes;
+ SJoinStatus *status;
+ int32_t numOfUpstream;
+ SRspResultInfo resultInfo; // todo refactor, add this info for each operator
+} SJoinOperatorInfo;
+
+static void converNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t rows, bool *gotNchar) {
+ for (int32_t i = 0; i < numOfFilterCols; ++i) {
+ if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
+ pFilterInfo[i].pData2 = pFilterInfo[i].pData;
+ pFilterInfo[i].pData = malloc(rows * pFilterInfo[i].info.bytes);
+ int32_t bufSize = pFilterInfo[i].info.bytes - VARSTR_HEADER_SIZE;
+ for (int32_t j = 0; j < rows; ++j) {
+ char* dst = (char *)pFilterInfo[i].pData + j * pFilterInfo[i].info.bytes;
+ char* src = (char *)pFilterInfo[i].pData2 + j * pFilterInfo[i].info.bytes;
+ int32_t len = 0;
+ taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
+ varDataLen(dst) = len;
+ }
+ *gotNchar = true;
+ }
+ }
+}
+
+static void freeNcharFilterColumn(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
+ for (int32_t i = 0; i < numOfFilterCols; ++i) {
+ if (pFilterInfo[i].info.type == TSDB_DATA_TYPE_NCHAR) {
+ if (pFilterInfo[i].pData2) {
+ tfree(pFilterInfo[i].pData);
+ pFilterInfo[i].pData = pFilterInfo[i].pData2;
+ pFilterInfo[i].pData2 = NULL;
+ }
+ }
+ }
+}
+
+
+static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
+ int32_t offset = 0;
+ char* pData = pRes->data;
+
+ for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
+ if (pData != NULL) {
+ pColData->pData = pData + offset * pBlock->info.rows;
+ } else {
+ pColData->pData = pRes->urow[i];
+ }
+
+ offset += pColData->info.bytes;
+ }
+
+ // filter data if needed
+ if (numOfFilterCols > 0) {
+ doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
+ bool gotNchar = false;
+ converNcharFilterColumn(pFilterInfo, numOfFilterCols, pBlock->info.rows, &gotNchar);
+ int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
+ bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
+ if (gotNchar) {
+ freeNcharFilterColumn(pFilterInfo, numOfFilterCols);
+ }
+ if (!all) {
+ doCompactSDataBlock(pBlock, pBlock->info.rows, p);
+ }
+
+ tfree(p);
+ }
+
+ // todo refactor: extract method
+ // set the timestamp range of current result data block
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, 0);
+ if (pColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
+ pBlock->info.window.skey = ((int64_t*)pColData->pData)[0];
+ pBlock->info.window.ekey = ((int64_t*)pColData->pData)[pBlock->info.rows-1];
+ }
+
+ pRes->numOfRows = 0;
+}
+
+// NOTE: there is already exists data blocks before this function calls.
+SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
+ SOperatorInfo *pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SDummyInputInfo *pInput = pOperator->info;
+ SSqlObj* pSql = pInput->pSql;
+ SSqlRes* pRes = &pSql->res;
+
+ SSDataBlock* pBlock = pInput->block;
+ if (pOperator->pRuntimeEnv != NULL) {
+ pOperator->pRuntimeEnv->current = pInput->pTableQueryInfo;
+ }
+
+ pBlock->info.rows = pRes->numOfRows;
+ if (pRes->numOfRows != 0) {
+ doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
+ *newgroup = false;
+ return pBlock;
+ }
+
+ // No data block exists. So retrieve and transfer it into to SSDataBlock
+ TAOS_ROW pRow = NULL;
+ taos_fetch_block(pSql, &pRow);
+
+ if (pRes->numOfRows == 0) {
+ pOperator->status = OP_EXEC_DONE;
+ return NULL;
+ }
+
+ pBlock->info.rows = pRes->numOfRows;
+ doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo, pInput->numOfFilterCols);
+ *newgroup = false;
+ return pBlock;
+}
+
+static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) {
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+
+ for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
+ SJoinStatus* pStatus = &pJoinInfo->status[i];
+ if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
+ tscDebug("Retrieve nest query result, index:%d, total:%d", i, pOperator->numOfUpstream);
+
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+ pStatus->index = 0;
+
+ if (pStatus->pBlock == NULL) {
+ pOperator->status = OP_EXEC_DONE;
+ pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
+ break;
+ }
+ }
+ }
+}
+
+SSDataBlock* doDataBlockJoin(void* param, bool* newgroup) {
+ SOperatorInfo *pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ assert(pOperator->numOfUpstream > 1);
+
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+ pJoinInfo->pRes->info.rows = 0;
+
+ while(1) {
+ fetchNextBlockIfCompleted(pOperator, newgroup);
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pJoinInfo->pRes;
+ }
+
+ SJoinStatus* st0 = &pJoinInfo->status[0];
+ SColumnInfoData* p0 = taosArrayGet(st0->pBlock->pDataBlock, 0);
+ int64_t* ts0 = (int64_t*) p0->pData;
+
+ bool prefixEqual = true;
+
+ while(1) {
+ prefixEqual = true;
+ for (int32_t i = 1; i < pJoinInfo->numOfUpstream; ++i) {
+ SJoinStatus* st = &pJoinInfo->status[i];
+
+ SColumnInfoData* p = taosArrayGet(st->pBlock->pDataBlock, 0);
+ int64_t* ts = (int64_t*)p->pData;
+
+ if (ts[st->index] < ts0[st0->index]) { // less than the first
+ prefixEqual = false;
+
+ if ((++(st->index)) >= st->pBlock->info.rows) {
+ fetchNextBlockIfCompleted(pOperator, newgroup);
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pJoinInfo->pRes;
+ }
+ }
+ } else if (ts[st->index] > ts0[st0->index]) { // greater than the first;
+ if (prefixEqual == true) {
+ prefixEqual = false;
+ for (int32_t j = 0; j < i; ++j) {
+ SJoinStatus* stx = &pJoinInfo->status[j];
+ if ((++(stx->index)) >= stx->pBlock->info.rows) {
+
+ fetchNextBlockIfCompleted(pOperator, newgroup);
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pJoinInfo->pRes;
+ }
+ }
+ }
+ } else {
+ if ((++(st0->index)) >= st0->pBlock->info.rows) {
+ fetchNextBlockIfCompleted(pOperator, newgroup);
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pJoinInfo->pRes;
+ }
+ }
+ }
+ }
+ }
+
+ if (prefixEqual) {
+ int32_t offset = 0;
+ bool completed = false;
+ for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
+ SJoinStatus* st1 = &pJoinInfo->status[i];
+ int32_t rows = pJoinInfo->pRes->info.rows;
+
+ for (int32_t j = 0; j < st1->pBlock->info.numOfCols; ++j) {
+ SColumnInfoData* pCol1 = taosArrayGet(pJoinInfo->pRes->pDataBlock, j + offset);
+ SColumnInfoData* pSrc = taosArrayGet(st1->pBlock->pDataBlock, j);
+
+ int32_t bytes = pSrc->info.bytes;
+ memcpy(pCol1->pData + rows * bytes, pSrc->pData + st1->index * bytes, bytes);
+ }
+
+ offset += st1->pBlock->info.numOfCols;
+ if ((++(st1->index)) == st1->pBlock->info.rows) {
+ completed = true;
+ }
+ }
+
+ if ((++pJoinInfo->pRes->info.rows) >= pJoinInfo->resultInfo.capacity) {
+ pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
+ return pJoinInfo->pRes;
+ }
+
+ if (completed == true) {
+ break;
+ }
+ }
}
+/*
+ while (st0->index < st0->pBlock->info.rows && st1->index < st1->pBlock->info.rows) {
+ SColumnInfoData* p0 = taosArrayGet(st0->pBlock->pDataBlock, 0);
+ SColumnInfoData* p1 = taosArrayGet(st1->pBlock->pDataBlock, 0);
+
+ int64_t* ts0 = (int64_t*)p0->pData;
+ int64_t* ts1 = (int64_t*)p1->pData;
+ if (ts0[st0->index] == ts1[st1->index]) { // add to the final result buffer
+ // check if current output buffer is over the threshold to pause current loop
+ int32_t rows = pJoinInfo->pRes->info.rows;
+ for (int32_t j = 0; j < st0->pBlock->info.numOfCols; ++j) {
+ SColumnInfoData* pCol1 = taosArrayGet(pJoinInfo->pRes->pDataBlock, j);
+ SColumnInfoData* pSrc = taosArrayGet(st0->pBlock->pDataBlock, j);
+
+ int32_t bytes = pSrc->info.bytes;
+ memcpy(pCol1->pData + rows * bytes, pSrc->pData + st0->index * bytes, bytes);
+ }
+
+ for (int32_t j = 0; j < st1->pBlock->info.numOfCols; ++j) {
+ SColumnInfoData* pCol1 = taosArrayGet(pJoinInfo->pRes->pDataBlock, j + st0->pBlock->info.numOfCols);
+ SColumnInfoData* pSrc = taosArrayGet(st1->pBlock->pDataBlock, j);
+
+ int32_t bytes = pSrc->info.bytes;
+ memcpy(pCol1->pData + rows * bytes, pSrc->pData + st1->index * bytes, bytes);
+ }
+
+ st0->index++;
+ st1->index++;
+
+ if ((++pJoinInfo->pRes->info.rows) >= pJoinInfo->resultInfo.capacity) {
+ pJoinInfo->resultInfo.total += pJoinInfo->pRes->info.rows;
+ return pJoinInfo->pRes;
+ }
+ } else if (ts0[st0->index] < ts1[st1->index]) {
+ st0->index++;
+ } else {
+ st1->index++;
+ }
+ }*/
}
}
+static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
+ SDummyInputInfo* pInfo = (SDummyInputInfo*) param;
+
+ // tricky
+ for(int32_t i = 0; i < numOfOutput; ++i) {
+ SColumnInfoData* pColInfoData = taosArrayGet(pInfo->block->pDataBlock, i);
+ pColInfoData->pData = NULL;
+ }
+
+ pInfo->block = destroyOutputBuf(pInfo->block);
+ pInfo->pSql = NULL;
+
+ doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols);
+
+ cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
+ tfree(pInfo->pTableQueryInfo);
+}
+
+// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
+SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols) {
+ assert(numOfCols > 0);
+ STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
+
+ SDummyInputInfo* pInfo = calloc(1, sizeof(SDummyInputInfo));
+
+ pInfo->pSql = pSql;
+ pInfo->pFilterInfo = pFilterInfo;
+ pInfo->numOfFilterCols = numOfFilterCols;
+ pInfo->pTableQueryInfo = createTmpTableQueryInfo(win);
+
+ pInfo->block = calloc(numOfCols, sizeof(SSDataBlock));
+ pInfo->block->info.numOfCols = numOfCols;
+
+ pInfo->block->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SColumnInfoData colData = {{0}};
+ colData.info.bytes = pSchema[i].bytes;
+ colData.info.type = pSchema[i].type;
+ colData.info.colId = pSchema[i].colId;
+
+ taosArrayPush(pInfo->block->pDataBlock, &colData);
+ }
+
+ SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo));
+ pOptr->name = "DummyInputOperator";
+ pOptr->operatorType = OP_DummyInput;
+ pOptr->numOfOutput = numOfCols;
+ pOptr->blockingOptr = false;
+ pOptr->info = pInfo;
+ pOptr->exec = doGetDataBlock;
+ pOptr->cleanup = destroyDummyInputOperator;
+ return pOptr;
+}
+
+static void destroyJoinOperator(void* param, int32_t numOfOutput) {
+ SJoinOperatorInfo* pInfo = (SJoinOperatorInfo*) param;
+ tfree(pInfo->status);
+
+ pInfo->pRes = destroyOutputBuf(pInfo->pRes);
+}
+
+SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput) {
+ SJoinOperatorInfo* pInfo = calloc(1, sizeof(SJoinOperatorInfo));
+
+ pInfo->numOfUpstream = numOfUpstream;
+ pInfo->status = calloc(numOfUpstream, sizeof(SJoinStatus));
+
+ SRspResultInfo* pResInfo = &pInfo->resultInfo;
+ pResInfo->capacity = 4096;
+ pResInfo->threshold = (int32_t) (4096 * 0.8);
+
+ pInfo->pRes = calloc(1, sizeof(SSDataBlock));
+ pInfo->pRes->info.numOfCols = numOfOutput;
+ pInfo->pRes->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData));
+ for(int32_t i = 0; i < numOfOutput; ++i) {
+ SColumnInfoData colData = {{0}};
+ colData.info.bytes = pSchema[i].bytes;
+ colData.info.type = pSchema[i].type;
+ colData.info.colId = pSchema[i].colId;
+ colData.pData = calloc(1, colData.info.bytes * pResInfo->capacity);
+
+ taosArrayPush(pInfo->pRes->pDataBlock, &colData);
+ }
+
+ SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
+ pOperator->name = "JoinOperator";
+ pOperator->operatorType = OP_Join;
+ pOperator->numOfOutput = numOfOutput;
+ pOperator->blockingOptr = false;
+ pOperator->info = pInfo;
+ pOperator->exec = doDataBlockJoin;
+ pOperator->cleanup = destroyJoinOperator;
+
+ for(int32_t i = 0; i < numOfUpstream; ++i) {
+ appendUpstream(pOperator, pUpstream[i]);
+ }
+
+ return pOperator;
+}
+
+void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar) {
+ // set the correct result
+ SSDataBlock* p = pQueryInfo->pQInfo->runtimeEnv.outputBuf;
+ pRes->numOfRows = (p != NULL)? p->info.rows: 0;
+
+ if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) {
+ tscCreateResPointerInfo(pRes, pQueryInfo);
+ tscSetResRawPtrRv(pRes, pQueryInfo, p, convertNchar);
+ }
+
+ tscDebug("0x%"PRIx64" retrieve result in pRes, numOfRows:%d", objId, pRes->numOfRows);
+ pRes->row = 0;
+ pRes->completed = (pRes->numOfRows == 0);
+}
+
+static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t* numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo) {
+ SColumnInfo* tableCols = calloc(numOfCol1, sizeof(SColumnInfo));
+ for(int32_t i = 0; i < numOfCol1; ++i) {
+ SColumn* pCol = taosArrayGetP(px->colList, i);
+ if (pCol->info.flist.numOfFilters > 0) {
+ (*numOfFilterCols) += 1;
+ }
+
+ tableCols[i] = pCol->info;
+ }
+
+ if ((*numOfFilterCols) > 0) {
+ doCreateFilterInfo(tableCols, numOfCol1, (*numOfFilterCols), pFilterInfo, 0);
+ }
+
+ tfree(tableCols);
+}
+
+void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
+ SSqlRes* pOutput = &pSql->res;
+
+ // handle the following query process
+ if (px->pQInfo == NULL) {
+ SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList);
+
+ STableMeta* pTableMeta = tscGetMetaInfo(px, 0)->pTableMeta;
+ SSchema* pSchema = tscGetTableSchema(pTableMeta);
+
+ STableGroupInfo tableGroupInfo = {
+ .numOfTables = 1,
+ .pGroupList = taosArrayInit(1, POINTER_BYTES),
+ };
+
+ tableGroupInfo.map = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+
+ STableKeyInfo tableKeyInfo = {.pTable = NULL, .lastKey = INT64_MIN};
+
+ SArray* group = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(group, &tableKeyInfo);
+
+ taosArrayPush(tableGroupInfo.pGroupList, &group);
+
+ // if it is a join query, create join operator here
+ int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
+
+ int32_t numOfFilterCols = 0;
+ SSingleColumnFilterInfo* pFilterInfo = NULL;
+ createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
+
+ SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols);
+ pOutput->precision = pSqlObjList[0]->res.precision;
+
+ SSchema* schema = NULL;
+ if (px->numOfTables > 1) {
+ SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES);
+ p[0] = pSourceOperator;
+
+ int32_t num = (int32_t) taosArrayGetSize(px->colList);
+ schema = calloc(num, sizeof(SSchema));
+ memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
+
+ int32_t offset = pSourceOperator->numOfOutput;
+
+ for(int32_t i = 1; i < px->numOfTables; ++i) {
+ STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
+
+ SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
+ int32_t n = pTableMeta1->tableInfo.numOfColumns;
+
+ int32_t numOfFilterCols1 = 0;
+ SSingleColumnFilterInfo* pFilterInfo1 = NULL;
+ createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols1, &pFilterInfo1);
+
+ p[i] = createDummyInputOperator(pSqlObjList[i], pSchema1, n, pFilterInfo1, numOfFilterCols1);
+ memcpy(&schema[offset], pSchema1, n * sizeof(SSchema));
+ offset += n;
+ }
+
+ pSourceOperator = createJoinOperatorInfo(p, px->numOfTables, schema, num);
+ tfree(p);
+ } else {
+ size_t num = taosArrayGetSize(px->colList);
+ schema = calloc(num, sizeof(SSchema));
+ memcpy(schema, pSchema, numOfCol1*sizeof(SSchema));
+ }
+
+ // update the exprinfo
+ int32_t numOfOutput = (int32_t)tscNumOfExprs(px);
+ for(int32_t i = 0; i < numOfOutput; ++i) {
+ SExprInfo* pex = taosArrayGetP(px->exprList, i);
+ int32_t colId = pex->base.colInfo.colId;
+ for(int32_t j = 0; j < pSourceOperator->numOfOutput; ++j) {
+ if (colId == schema[j].colId) {
+ pex->base.colInfo.colIndex = j;
+ break;
+ }
+ }
+ }
+
+ tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute the main query while all nest queries are ready", pSql->self, pSql->self);
+ px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN, pSql->self);
+
+ tfree(pColumnInfo);
+ tfree(schema);
+
+ // set the pRuntimeEnv for pSourceOperator
+ pSourceOperator->pRuntimeEnv = &px->pQInfo->runtimeEnv;
+ }
+
+ uint64_t qId = pSql->self;
+ qTableQuery(px->pQInfo, &qId);
+ convertQueryResult(pOutput, px, pSql->self, false);
+}
+
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
for (int i = 0; i < pRes->numOfCols; i++) {
tfree(pRes->buffer[i]);
}
-
+
pRes->numOfCols = 0;
}
-
+
tfree(pRes->pRsp);
tfree(pRes->tsrow);
@@ -431,63 +1338,121 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
}
tfree(pRes->final);
-
+
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
- if (pCmd == NULL || pCmd->numOfClause == 0) {
+ if (pCmd == NULL) {
return;
}
-
- for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
-
+
+ SQueryInfo* pQueryInfo = pCmd->pQueryInfo;
+ while(pQueryInfo != NULL) {
+ SQueryInfo* p = pQueryInfo->sibling;
+
+ size_t numOfUpstream = taosArrayGetSize(pQueryInfo->pUpstream);
+ for(int32_t i = 0; i < numOfUpstream; ++i) {
+ SQueryInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i);
+ freeQueryInfoImpl(pUpQueryInfo);
+
+ clearAllTableMetaInfo(pUpQueryInfo, removeMeta);
+ if (pUpQueryInfo->pQInfo != NULL) {
+ qDestroyQueryInfo(pUpQueryInfo->pQInfo);
+ pUpQueryInfo->pQInfo = NULL;
+ }
+
+ tfree(pUpQueryInfo);
+ }
+
+ if (pQueryInfo->udfCopy) {
+ pQueryInfo->pUdfInfo = taosArrayDestroy(pQueryInfo->pUdfInfo);
+ } else {
+ pQueryInfo->pUdfInfo = tscDestroyUdfArrayList(pQueryInfo->pUdfInfo);
+ }
+
freeQueryInfoImpl(pQueryInfo);
clearAllTableMetaInfo(pQueryInfo, removeMeta);
+
+ if (pQueryInfo->pQInfo != NULL) {
+ qDestroyQueryInfo(pQueryInfo->pQInfo);
+ pQueryInfo->pQInfo = NULL;
+ }
+
tfree(pQueryInfo);
+ pQueryInfo = p;
}
-
- pCmd->numOfClause = 0;
- tfree(pCmd->pQueryInfo);
+
+ pCmd->pQueryInfo = NULL;
+ pCmd->active = NULL;
}
-void destroyTableNameList(SSqlCmd* pCmd) {
- if (pCmd->numOfTables == 0) {
- assert(pCmd->pTableNameList == NULL);
+void destroyTableNameList(SInsertStatementParam* pInsertParam) {
+ if (pInsertParam->numOfTables == 0) {
+ assert(pInsertParam->pTableNameList == NULL);
return;
}
- for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
- tfree(pCmd->pTableNameList[i]);
+ for(int32_t i = 0; i < pInsertParam->numOfTables; ++i) {
+ tfree(pInsertParam->pTableNameList[i]);
}
- pCmd->numOfTables = 0;
- tfree(pCmd->pTableNameList);
+ pInsertParam->numOfTables = 0;
+ tfree(pInsertParam->pTableNameList);
}
-void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
+void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
- pCmd->curSql = NULL;
pCmd->msgType = 0;
- pCmd->parseFinished = 0;
- pCmd->autoCreated = 0;
- destroyTableNameList(pCmd);
+ pCmd->insertParam.sql = NULL;
+ destroyTableNameList(&pCmd->insertParam);
+
+ pCmd->insertParam.pTableBlockHashList = tscDestroyBlockHashTable(pCmd->insertParam.pTableBlockHashList, clearCachedMeta);
+ pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pCmd->insertParam.pDataBlocks);
+ tfree(pCmd->insertParam.tagData.data);
+ pCmd->insertParam.tagData.dataLen = 0;
+
+ tscFreeQueryInfo(pCmd, clearCachedMeta);
+
+ if (pCmd->pTableMetaMap != NULL) {
+ STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
+ while (p) {
+ taosArrayDestroy(p->vgroupIdList);
+ tfree(p->pTableMeta);
+ p = taosHashIterate(pCmd->pTableMetaMap, p);
+ }
+
+ taosHashCleanup(pCmd->pTableMetaMap);
+ pCmd->pTableMetaMap = NULL;
+ }
+}
+
+void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
+ if (pTableMetaMap == NULL) {
+ return NULL;
+ }
+
+ STableMetaVgroupInfo* p = taosHashIterate(pTableMetaMap, NULL);
+ while (p) {
+ taosArrayDestroy(p->vgroupIdList);
+ tfree(p->pTableMeta);
+ p = taosHashIterate(pTableMetaMap, p);
+ }
- pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
- pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
- tscFreeQueryInfo(pCmd, removeMeta);
+ taosHashCleanup(pTableMetaMap);
+ return NULL;
}
void tscFreeSqlResult(SSqlObj* pSql) {
- tscDestroyLocalMerger(pSql);
-
SSqlRes* pRes = &pSql->res;
+
+ tscDestroyGlobalMerger(pRes->pMerger);
+ pRes->pMerger = NULL;
+
tscDestroyResPointerInfo(pRes);
-
memset(&pSql->res, 0, sizeof(SSqlRes));
}
@@ -499,7 +1464,7 @@ void tscFreeSubobj(SSqlObj* pSql) {
tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub);
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
- tscDebug("0x%"PRIx64" free sub SqlObj:%p, index:%d", pSql->self, pSql->pSubs[i], i);
+ tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
@@ -509,7 +1474,6 @@ void tscFreeSubobj(SSqlObj* pSql) {
}
tfree(pSql->subState.states);
-
pSql->subState.numOfSub = 0;
}
@@ -533,7 +1497,6 @@ void tscFreeRegisteredSqlObj(void *pSql) {
tscDebug("0x%"PRIx64" free SqlObj, total in tscObj:%d, total:%d", p->self, num, total);
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
-
}
void tscFreeMetaSqlObj(int64_t *rid){
@@ -553,6 +1516,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
return;
}
+ int64_t sid = pSql->self;
+
tscDebug("0x%"PRIx64" start to free sqlObj", pSql->self);
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
@@ -564,7 +1529,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
int32_t cmd = pCmd->command;
- if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
+ if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_GLOBALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscRemoveFromSqlList(pSql);
}
@@ -572,6 +1537,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->signature = NULL;
pSql->fp = NULL;
tfree(pSql->sqlstr);
+ tfree(pSql->pBuf);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
@@ -580,13 +1546,11 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscFreeSqlResult(pSql);
tscResetSqlCmd(pCmd, false);
- tfree(pCmd->tagData.data);
- pCmd->tagData.dataLen = 0;
-
- memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
-
+
+ tscDebug("0x%"PRIx64" addr:%p free completed", sid, pSql);
+
tsem_destroy(&pSql->rspSem);
memset(pSql, 0, sizeof(*pSql));
free(pSql);
@@ -595,6 +1559,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
void tscDestroyBoundColumnInfo(SParsedDataColInfo* pColInfo) {
tfree(pColInfo->boundedColumns);
tfree(pColInfo->cols);
+ tfree(pColInfo->colIdxInfo);
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
@@ -603,21 +1568,25 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
}
tfree(pDataBlock->pData);
- tfree(pDataBlock->params);
-
- // free the refcount for metermeta
- if (pDataBlock->pTableMeta != NULL) {
- tfree(pDataBlock->pTableMeta);
- }
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pDataBlock->tableName, name);
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ }
+
+ if (!pDataBlock->cloned) {
+ tfree(pDataBlock->params);
+
+ // free the refcount for metermeta
+ if (pDataBlock->pTableMeta != NULL) {
+ tfree(pDataBlock->pTableMeta);
+ }
+
+ tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
}
- tscDestroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
tfree(pDataBlock);
}
@@ -660,6 +1629,47 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
return NULL;
}
+
+void freeUdfInfo(SUdfInfo* pUdfInfo) {
+ if (pUdfInfo == NULL) {
+ return;
+ }
+
+ if (pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY]) {
+ (*(udfDestroyFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY])(&pUdfInfo->init);
+ }
+
+ tfree(pUdfInfo->name);
+
+ if (pUdfInfo->path) {
+ unlink(pUdfInfo->path);
+ }
+
+ tfree(pUdfInfo->path);
+
+ tfree(pUdfInfo->content);
+
+ taosCloseDll(pUdfInfo->handle);
+}
+
+// todo refactor
+void* tscDestroyUdfArrayList(SArray* pUdfList) {
+ if (pUdfList == NULL) {
+ return NULL;
+ }
+
+ size_t size = taosArrayGetSize(pUdfList);
+ for (int32_t i = 0; i < size; i++) {
+ SUdfInfo* udf = taosArrayGet(pUdfList, i);
+ freeUdfInfo(udf);
+ }
+
+ taosArrayDestroy(pUdfList);
+ return NULL;
+}
+
+
+
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
@@ -677,14 +1687,11 @@ void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
SSqlCmd* pCmd = &pSql->cmd;
- assert(pDataBlock->pTableMeta != NULL);
-
- pCmd->numOfTablesInSubmit = pDataBlock->numOfTables;
+ assert(pDataBlock->pTableMeta != NULL && pDataBlock->size <= pDataBlock->nAllocSize && pDataBlock->size > sizeof(SMsgDesc));
- assert(pCmd->numOfClause == 1);
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
+ STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
- // todo refactor
+ // todo remove it later
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tNameAssign(&pTableMetaInfo->name, &pDataBlock->tableName);
@@ -694,32 +1701,46 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
}
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
- pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
+ pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pDataBlock->pTableMeta);
+ pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
}
/*
- * the submit message consists of : [RPC header|message body|digest]
- * the dataBlock only includes the RPC Header buffer and actual submit message body, space for digest needs
- * additional space.
+ * the format of submit message is as follows [RPC header|message body|digest]
+ * the dataBlock only includes the RPC Header buffer and actual submit message body,
+ * space for digest needs additional space.
*/
- int ret = tscAllocPayload(pCmd, pDataBlock->size + 100);
+ int ret = tscAllocPayload(pCmd, pDataBlock->size);
if (TSDB_CODE_SUCCESS != ret) {
return ret;
}
- assert(pDataBlock->size <= pDataBlock->nAllocSize);
memcpy(pCmd->payload, pDataBlock->pData, pDataBlock->size);
- /*
- * the payloadLen should be actual message body size
- * the old value of payloadLen is the allocated payload size
- */
+ //the payloadLen should be actual message body size, the old value of payloadLen is the allocated payload size
pCmd->payloadLen = pDataBlock->size;
+ assert(pCmd->allocSize >= (uint32_t)(pCmd->payloadLen));
- assert(pCmd->allocSize >= (uint32_t)(pCmd->payloadLen + 100) && pCmd->payloadLen > 0);
+ // NOTE: shell message size should not include SMsgDesc
+ int32_t size = pCmd->payloadLen - sizeof(SMsgDesc);
+
+ SMsgDesc* pMsgDesc = (SMsgDesc*) pCmd->payload;
+ pMsgDesc->numOfVnodes = htonl(1); // always for one vnode
+
+ SSubmitMsg *pShellMsg = (SSubmitMsg *)(pCmd->payload + sizeof(SMsgDesc));
+ pShellMsg->header.vgId = htonl(pDataBlock->pTableMeta->vgId); // data in current block all routes to the same vgroup
+ pShellMsg->header.contLen = htonl(size); // the length not includes the size of SMsgDesc
+ pShellMsg->length = pShellMsg->header.contLen;
+ pShellMsg->numOfBlocks = htonl(pDataBlock->numOfTables); // the number of tables to be inserted
+
+ tscDebug("0x%"PRIx64" submit msg built, vgId:%d numOfTables:%d", pSql->self, pDataBlock->pTableMeta->vgId, pDataBlock->numOfTables);
return TSDB_CODE_SUCCESS;
}
+SQueryInfo* tscGetQueryInfo(SSqlCmd* pCmd) {
+ return pCmd->active;
+}
+
/**
* create the in-memory buffer for each table to keep the submitted data block
* @param initialSize
@@ -744,13 +1765,15 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
if (dataBuf->nAllocSize <= dataBuf->headerSize) {
dataBuf->nAllocSize = dataBuf->headerSize * 2;
}
-
- dataBuf->pData = calloc(1, dataBuf->nAllocSize);
+
+ //dataBuf->pData = calloc(1, dataBuf->nAllocSize);
+ dataBuf->pData = malloc(dataBuf->nAllocSize);
if (dataBuf->pData == NULL) {
tscError("failed to allocated memory, reason:%s", strerror(errno));
tfree(dataBuf);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+ memset(dataBuf->pData, 0, sizeof(SSubmitBlk));
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
@@ -798,11 +1821,13 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
return TSDB_CODE_SUCCESS;
}
-static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bool includeSchema) {
+// Erase the empty space reserved for binary data
+static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
+ SBlockKeyTuple* blkKeyTuple) {
// TODO: optimize this function, handle the case while binary is not presented
- STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
- STableComInfo tinfo = tscGetTableInfo(pTableMeta);
- SSchema* pSchema = tscGetTableSchema(pTableMeta);
+ STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
+ STableComInfo tinfo = tscGetTableInfo(pTableMeta);
+ SSchema* pSchema = tscGetTableSchema(pTableMeta);
SSubmitBlk* pBlock = pDataBlock;
memcpy(pDataBlock, pTableDataBlock->pData, sizeof(SSubmitBlk));
@@ -811,7 +1836,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t flen = 0; // original total length of row
// schema needs to be included into the submit data block
- if (includeSchema) {
+ if (insertParam->schemaAttached) {
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
for(int32_t j = 0; j < numOfCols; ++j) {
STColumn* pCol = (STColumn*) pDataBlock;
@@ -827,31 +1852,51 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t schemaSize = sizeof(STColumn) * numOfCols;
pBlock->schemaLen = schemaSize;
} else {
- for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
- flen += TYPE_BYTES[pSchema[j].type];
+ if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
+ for (int32_t j = 0; j < tinfo.numOfColumns; ++j) {
+ flen += TYPE_BYTES[pSchema[j].type];
+ }
}
-
pBlock->schemaLen = 0;
}
char* p = pTableDataBlock->pData + sizeof(SSubmitBlk);
pBlock->dataLen = 0;
int32_t numOfRows = htons(pBlock->numOfRows);
-
- for (int32_t i = 0; i < numOfRows; ++i) {
- SDataRow trow = (SDataRow) pDataBlock;
- dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen));
- dataRowSetVersion(trow, pTableMeta->sversion);
- int toffset = 0;
- for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
- tdAppendColVal(trow, p, pSchema[j].type, pSchema[j].bytes, toffset);
- toffset += TYPE_BYTES[pSchema[j].type];
- p += pSchema[j].bytes;
- }
+ if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
+ for (int32_t i = 0; i < numOfRows; ++i) {
+ SMemRow memRow = (SMemRow)pDataBlock;
+ memRowSetType(memRow, SMEM_ROW_DATA);
+ SDataRow trow = memRowDataBody(memRow);
+ dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen));
+ dataRowSetVersion(trow, pTableMeta->sversion);
+
+ int toffset = 0;
+ for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
+ tdAppendColVal(trow, p, pSchema[j].type, toffset);
+ toffset += TYPE_BYTES[pSchema[j].type];
+ p += pSchema[j].bytes;
+ }
- pDataBlock = (char*)pDataBlock + dataRowLen(trow);
- pBlock->dataLen += dataRowLen(trow);
+ pDataBlock = (char*)pDataBlock + memRowTLen(memRow);
+ pBlock->dataLen += memRowTLen(memRow);
+ }
+ } else {
+ for (int32_t i = 0; i < numOfRows; ++i) {
+ char* payload = (blkKeyTuple + i)->payloadAddr;
+ if (isNeedConvertRow(payload)) {
+ convertSMemRow(pDataBlock, payload, pTableDataBlock);
+ TDRowTLenT rowTLen = memRowTLen(pDataBlock);
+ pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
+ pBlock->dataLen += rowTLen;
+ } else {
+ TDRowTLenT rowTLen = memRowTLen(payload);
+ memcpy(pDataBlock, payload, rowTLen);
+ pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen);
+ pBlock->dataLen += rowTLen;
+ }
+ }
}
int32_t len = pBlock->dataLen + pBlock->schemaLen;
@@ -862,10 +1907,10 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
}
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
- int32_t result = TD_DATA_ROW_HEAD_SIZE;
- int32_t columns = tscGetNumOfColumns(pTableMeta);
+ int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE;
+ int32_t columns = tscGetNumOfColumns(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
- for(int32_t i = 0; i < columns; i++) {
+ for (int32_t i = 0; i < columns; i++) {
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
}
@@ -873,102 +1918,129 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result;
}
-static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
- pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList);
- if (pCmd->pTableNameList == NULL) {
- pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
- } else {
- memset(pCmd->pTableNameList, 0, pCmd->numOfTables * POINTER_BYTES);
+static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
+ pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList);
+ if (pInsertParam->pTableNameList == NULL) {
+ pInsertParam->pTableNameList = malloc(pInsertParam->numOfTables * POINTER_BYTES);
}
- STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+ STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
- tfree(pCmd->pTableNameList[i]);
+ //tfree(pInsertParam->pTableNameList[i]);
- pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
- p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
+ pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
+ p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1);
}
if (freeBlockMap) {
- pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, false);
+ pInsertParam->pTableBlockHashList = tscDestroyBlockHashTable(pInsertParam->pTableBlockHashList, false);
}
}
-int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
- const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
- SSqlCmd* pCmd = &pSql->cmd;
-
- void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
- SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
+int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
+ const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
+ int code = 0;
+ bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType);
+ void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
+ SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
- STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL);
+ STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL);
STableDataBlocks* pOneTableBlock = *p;
- while(pOneTableBlock) {
- // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
- int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
- STableDataBlocks* dataBuf = NULL;
-
- int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
- INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
- if (ret != TSDB_CODE_SUCCESS) {
- tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
- taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
- return ret;
- }
- SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
- int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+ SBlockKeyInfo blkKeyInfo = {0}; // share by pOneTableBlock
- if (dataBuf->nAllocSize < destSize) {
- while (dataBuf->nAllocSize < destSize) {
- dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5);
+ while(pOneTableBlock) {
+ SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
+ if (pBlocks->numOfRows > 0) {
+ // the maximum expanded size in byte when a row-wise data is converted to SDataRow format
+ int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0;
+ STableDataBlocks* dataBuf = NULL;
+
+ int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
+ INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret);
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(blkKeyInfo.pKeyTuple);
+ return ret;
}
- char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
- if (tmp != NULL) {
- dataBuf->pData = tmp;
- memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
- } else { // failed to allocate memory, free already allocated memory and return error code
- tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
+ int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize +
+ sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
- taosHashCleanup(pVnodeDataBlockHashList);
- tscDestroyBlockArrayList(pVnodeDataBlockList);
- tfree(dataBuf->pData);
+ if (dataBuf->nAllocSize < destSize) {
+ dataBuf->nAllocSize = (uint32_t)(destSize * 1.5);
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize);
+ if (tmp != NULL) {
+ dataBuf->pData = tmp;
+ //memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
+ } else { // failed to allocate memory, free already allocated memory and return error code
+ tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize);
+
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(dataBuf->pData);
+ tfree(blkKeyInfo.pKeyTuple);
+
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
}
- }
- tscSortRemoveDataBlockDupRows(pOneTableBlock);
- char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
+ if (isRawPayload) {
+ tscSortRemoveDataBlockDupRowsRaw(pOneTableBlock);
+ char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize * (pBlocks->numOfRows - 1);
+
+ tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
+ pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
+ pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
+ } else {
+ if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) {
+ taosHashCleanup(pVnodeDataBlockHashList);
+ tscDestroyBlockArrayList(pVnodeDataBlockList);
+ tfree(dataBuf->pData);
+ tfree(blkKeyInfo.pKeyTuple);
+ return code;
+ }
+ ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0);
+
+ SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1;
+ tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
+ pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
+ pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
+ }
- tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql->self, tNameGetTableName(&pOneTableBlock->tableName),
- pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
+ int32_t len = pBlocks->numOfRows *
+ (isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) +
+ sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
- int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
+ pBlocks->tid = htonl(pBlocks->tid);
+ pBlocks->uid = htobe64(pBlocks->uid);
+ pBlocks->sversion = htonl(pBlocks->sversion);
+ pBlocks->numOfRows = htons(pBlocks->numOfRows);
+ pBlocks->schemaLen = 0;
- pBlocks->tid = htonl(pBlocks->tid);
- pBlocks->uid = htobe64(pBlocks->uid);
- pBlocks->sversion = htonl(pBlocks->sversion);
- pBlocks->numOfRows = htons(pBlocks->numOfRows);
- pBlocks->schemaLen = 0;
+ // erase the empty space reserved for binary data
+ int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam, blkKeyInfo.pKeyTuple);
+ assert(finalLen <= len);
- // erase the empty space reserved for binary data
- int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pCmd->submitSchema);
- assert(finalLen <= len);
+ dataBuf->size += (finalLen + sizeof(SSubmitBlk));
+ assert(dataBuf->size <= dataBuf->nAllocSize);
- dataBuf->size += (finalLen + sizeof(SSubmitBlk));
- assert(dataBuf->size <= dataBuf->nAllocSize);
+ // the length does not include the SSubmitBlk structure
+ pBlocks->dataLen = htonl(finalLen);
+ dataBuf->numOfTables += 1;
- // the length does not include the SSubmitBlk structure
- pBlocks->dataLen = htonl(finalLen);
- dataBuf->numOfTables += 1;
+ pBlocks->numOfRows = 0;
+ }else {
+ tscDebug("0x%"PRIx64" table %s data block is empty", pInsertParam->objectId, pOneTableBlock->tableName.tname);
+ }
- p = taosHashIterate(pCmd->pTableBlockHashList, p);
+ p = taosHashIterate(pInsertParam->pTableBlockHashList, p);
if (p == NULL) {
break;
}
@@ -976,11 +2048,12 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
pOneTableBlock = *p;
}
- extractTableNameList(pCmd, freeBlockMap);
+ extractTableNameList(pInsertParam, freeBlockMap);
// free the table data blocks;
- pCmd->pDataBlocks = pVnodeDataBlockList;
+ pInsertParam->pDataBlocks = pVnodeDataBlockList;
taosHashCleanup(pVnodeDataBlockHashList);
+ tfree(blkKeyInfo.pKeyTuple);
return TSDB_CODE_SUCCESS;
}
@@ -1011,26 +2084,30 @@ bool tscIsInsertData(char* sqlstr) {
}
int tscAllocPayload(SSqlCmd* pCmd, int size) {
- assert(size > 0);
-
if (pCmd->payload == NULL) {
assert(pCmd->allocSize == 0);
pCmd->payload = (char*)calloc(1, size);
- if (pCmd->payload == NULL) return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ if (pCmd->payload == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
pCmd->allocSize = size;
} else {
if (pCmd->allocSize < (uint32_t)size) {
char* b = realloc(pCmd->payload, size);
- if (b == NULL) return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ if (b == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
pCmd->payload = b;
pCmd->allocSize = size;
}
-
+
memset(pCmd->payload, 0, pCmd->allocSize);
}
- assert(pCmd->allocSize >= (uint32_t)size);
+ assert(pCmd->allocSize >= (uint32_t)size && size > 0);
return TSDB_CODE_SUCCESS;
}
@@ -1040,16 +2117,27 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
return f;
}
+int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo) {
+ if (pQueryInfo->fieldsInfo.numOfOutput <= 0 || pQueryInfo->fieldsInfo.internalField == NULL) {
+ return 0;
+ }
+
+ for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
+ SInternalField* pField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
+ if (!pField->visible) {
+ return i;
+ }
+ }
+
+ return pQueryInfo->fieldsInfo.numOfOutput;
+}
+
+
SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
assert(pFieldInfo != NULL);
pFieldInfo->numOfOutput++;
-
- struct SInternalField info = {
- .pSqlExpr = NULL,
- .pArithExprInfo = NULL,
- .visible = true,
- .pFieldFilters = NULL,
- };
+
+ struct SInternalField info = { .pExpr = NULL, .visible = true };
info.field = *pField;
return taosArrayPush(pFieldInfo->internalField, &info);
@@ -1057,28 +2145,21 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field) {
pFieldInfo->numOfOutput++;
- struct SInternalField info = {
- .pSqlExpr = NULL,
- .pArithExprInfo = NULL,
- .visible = true,
- .pFieldFilters = NULL,
- };
+ struct SInternalField info = { .pExpr = NULL, .visible = true };
info.field = *field;
return taosArrayInsert(pFieldInfo->internalField, index, &info);
}
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
-
- SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
- pExpr->offset = 0;
+ int32_t offset = 0;
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
- for (int32_t i = 1; i < numOfExprs; ++i) {
- SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1);
- SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i);
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* p = taosArrayGetP(pQueryInfo->exprList, i);
- p->offset = prev->offset + prev->resBytes;
+ p->base.offset = offset;
+ offset += p->base.resBytes;
}
}
@@ -1094,9 +2175,9 @@ TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index) {
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index) {
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, index);
- assert(pInfo != NULL && pInfo->pSqlExpr != NULL);
+ assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL);
- return pInfo->pSqlExpr->offset;
+ return pInfo->pExpr->base.offset;
}
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) {
@@ -1119,6 +2200,7 @@ int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFi
*diffSize = 1;
if (pField2->bytes > pField1->bytes) {
+ assert(IS_VAR_DATA_TYPE(pField1->type));
pField1->bytes = pField2->bytes;
}
}
@@ -1146,144 +2228,205 @@ int32_t tscGetResRowLength(SArray* pExprList) {
if (num == 0) {
return 0;
}
-
+
int32_t size = 0;
for(int32_t i = 0; i < num; ++i) {
- SSqlExpr* pExpr = taosArrayGetP(pExprList, i);
- size += pExpr->resBytes;
+ SExprInfo* pExpr = taosArrayGetP(pExprList, i);
+ size += pExpr->base.resBytes;
}
-
+
return size;
}
-static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
- for(int32_t i = 0; i < numOfFilters; ++i) {
- if (pFilterInfo[i].filterstr) {
- tfree(pFilterInfo[i].pz);
+static void destroyFilterInfo(SColumnFilterList* pFilterList) {
+ for(int32_t i = 0; i < pFilterList->numOfFilters; ++i) {
+ if (pFilterList->filterInfo[i].filterstr) {
+ tfree(pFilterList->filterInfo[i].pz);
}
}
-
- tfree(pFilterInfo);
-}
-static void tscColumnDestroy(SColumn* pCol) {
- destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters);
- free(pCol);
+ tfree(pFilterList->filterInfo);
+ pFilterList->numOfFilters = 0;
}
+void* sqlExprDestroy(SExprInfo* pExpr) {
+ if (pExpr == NULL) {
+ return NULL;
+ }
+
+ SSqlExpr* p = &pExpr->base;
+ for(int32_t i = 0; i < tListLen(p->param); ++i) {
+ tVariantDestroy(&p->param[i]);
+ }
+
+ if (p->flist.numOfFilters > 0) {
+ tfree(p->flist.filterInfo);
+ }
+
+ if (pExpr->pExpr != NULL) {
+ tExprTreeDestroy(pExpr->pExpr, NULL);
+ }
+
+ tfree(pExpr);
+ return NULL;
+}
void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
if (pFieldInfo == NULL) {
return;
}
- for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) {
- SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i);
-
- if (pInfo->pArithExprInfo != NULL) {
- tExprTreeDestroy(pInfo->pArithExprInfo->pExpr, NULL);
+ if (pFieldInfo->internalField != NULL) {
+ size_t num = taosArrayGetSize(pFieldInfo->internalField);
+ for (int32_t i = 0; i < num; ++i) {
+ SInternalField* pfield = taosArrayGet(pFieldInfo->internalField, i);
+ if (pfield->pExpr != NULL && pfield->pExpr->pExpr != NULL) {
+ sqlExprDestroy(pfield->pExpr);
+ }
+ }
+ }
+
+ taosArrayDestroy(pFieldInfo->internalField);
+ tfree(pFieldInfo->final);
+
+ memset(pFieldInfo, 0, sizeof(SFieldInfo));
+}
- SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
- for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
- if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) {
- tfree(pFuncMsg->arg[j].argValue.pz);
+void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArray* pExprList) {
+ assert(pFieldInfo != NULL && pSrc != NULL && pExprList != NULL);
+ pFieldInfo->numOfOutput = pSrc->numOfOutput;
+
+ if (pSrc->final != NULL) {
+ pFieldInfo->final = calloc(pSrc->numOfOutput, sizeof(TAOS_FIELD));
+ memcpy(pFieldInfo->final, pSrc->final, sizeof(TAOS_FIELD) * pSrc->numOfOutput);
+ }
+
+ if (pSrc->internalField != NULL) {
+ size_t num = taosArrayGetSize(pSrc->internalField);
+ size_t numOfExpr = taosArrayGetSize(pExprList);
+
+ for (int32_t i = 0; i < num; ++i) {
+ SInternalField* pfield = taosArrayGet(pSrc->internalField, i);
+
+ SInternalField p = {.visible = pfield->visible, .field = pfield->field};
+
+ bool found = false;
+ int32_t resColId = pfield->pExpr->base.resColId;
+ for(int32_t j = 0; j < numOfExpr; ++j) {
+ SExprInfo* pExpr = taosArrayGetP(pExprList, j);
+ if (pExpr->base.resColId == resColId) {
+ p.pExpr = pExpr;
+ found = true;
+ break;
}
}
- tfree(pInfo->pArithExprInfo);
- }
+ if (!found) {
+ assert(pfield->pExpr->pExpr != NULL);
+ p.pExpr = calloc(1, sizeof(SExprInfo));
+ tscExprAssign(p.pExpr, pfield->pExpr);
+ }
- if (pInfo->pFieldFilters != NULL) {
- tscColumnDestroy(pInfo->pFieldFilters->pFilters);
- tfree(pInfo->pFieldFilters);
+ taosArrayPush(pFieldInfo->internalField, &p);
}
}
-
- taosArrayDestroy(pFieldInfo->internalField);
-
- memset(pFieldInfo, 0, sizeof(SFieldInfo));
}
-static SSqlExpr* doCreateSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
- int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
-
- SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr));
+
+SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+ int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
+ SExprInfo* pExpr = calloc(1, sizeof(SExprInfo));
if (pExpr == NULL) {
return NULL;
}
- pExpr->functionId = functionId;
+ SSqlExpr* p = &pExpr->base;
+ p->functionId = functionId;
// set the correct columnIndex index
if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
- } else if (pColIndex->columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
- pExpr->colInfo.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
+ SSchema* s = tGetTbnameColumnSchema();
+ p->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
+ p->colBytes = s->bytes;
+ p->colType = s->type;
} else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) {
- pExpr->colInfo.colId = pColIndex->columnIndex;
+ p->colInfo.colId = pColIndex->columnIndex;
+ p->colBytes = size;
+ p->colType = type;
+ } else if (functionId == TSDB_FUNC_BLKINFO) {
+ p->colInfo.colId = pColIndex->columnIndex;
+ p->colBytes = TSDB_MAX_BINARY_LEN;
+ p->colType = TSDB_DATA_TYPE_BINARY;
} else {
+ int32_t len = tListLen(p->colInfo.name);
if (TSDB_COL_IS_TAG(colType)) {
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
- pExpr->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
- tstrncpy(pExpr->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(pExpr->colInfo.name));
+ p->colInfo.colId = pSchema[pColIndex->columnIndex].colId;
+ p->colBytes = pSchema[pColIndex->columnIndex].bytes;
+ p->colType = pSchema[pColIndex->columnIndex].type;
+ snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema[pColIndex->columnIndex].name);
} else if (pTableMetaInfo->pTableMeta != NULL) {
// in handling select database/version/server_status(), the pTableMeta is NULL
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex);
- pExpr->colInfo.colId = pSchema->colId;
- tstrncpy(pExpr->colInfo.name, pSchema->name, sizeof(pExpr->colInfo.name));
+ p->colInfo.colId = pSchema->colId;
+ p->colBytes = pSchema->bytes;
+ p->colType = pSchema->type;
+ snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema->name);
}
}
-
- pExpr->colInfo.flag = colType;
- pExpr->colInfo.colIndex = pColIndex->columnIndex;
- pExpr->resType = type;
- pExpr->resBytes = size;
- pExpr->resColId = resColId;
- pExpr->interBytes = interSize;
+ p->colInfo.flag = colType;
+ p->colInfo.colIndex = pColIndex->columnIndex;
+
+ p->resType = type;
+ p->resBytes = size;
+ p->resColId = resColId;
+ p->interBytes = interSize;
if (pTableMetaInfo->pTableMeta) {
- pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
+ p->uid = pTableMetaInfo->pTableMeta->id.uid;
}
-
+
return pExpr;
}
-SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList);
if (index == num) {
- return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
+ return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
}
-
- SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
+
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
+ SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
return pExpr;
}
-SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
+SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
- SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
+ STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
+ SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayPush(pQueryInfo->exprList, &pExpr);
return pExpr;
}
-SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex,
+SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex,
int16_t type, int16_t size) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, index);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, index);
if (pExpr == NULL) {
return NULL;
}
- pExpr->functionId = functionId;
+ SSqlExpr* pse = &pExpr->base;
+ pse->functionId = functionId;
- pExpr->colInfo.colIndex = srcColumnIndex;
- pExpr->colInfo.colId = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, srcColumnIndex)->colId;
+ pse->colInfo.colIndex = srcColumnIndex;
+ pse->colInfo.colId = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, srcColumnIndex)->colId;
- pExpr->resType = type;
- pExpr->resBytes = size;
+ pse->resType = type;
+ pse->resBytes = size;
return pExpr;
}
@@ -1293,10 +2436,10 @@ bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) {
return false;
}
- int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
+ int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_STDDEV_DST) {
return true;
}
}
@@ -1304,11 +2447,12 @@ bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) {
return false;
}
-size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
+size_t tscNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList);
}
-void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
+// todo REFACTOR
+void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
assert (pExpr != NULL || argument != NULL || bytes != 0);
// set parameter value
@@ -1319,60 +2463,36 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes)
assert(pExpr->numOfParams <= 3);
}
-SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index) {
+SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) {
return taosArrayGetP(pQueryInfo->exprList, index);
}
-void* sqlExprDestroy(SSqlExpr* pExpr) {
- if (pExpr == NULL) {
- return NULL;
- }
-
- for(int32_t i = 0; i < tListLen(pExpr->param); ++i) {
- tVariantDestroy(&pExpr->param[i]);
- }
-
- tfree(pExpr);
-
- return NULL;
-}
-
/*
- * NOTE: Does not release SSqlExprInfo here.
+ * NOTE: Does not release SExprInfo here.
*/
-void tscSqlExprInfoDestroy(SArray* pExprInfo) {
+void tscExprDestroy(SArray* pExprInfo) {
size_t size = taosArrayGetSize(pExprInfo);
-
+
for(int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = taosArrayGetP(pExprInfo, i);
+ SExprInfo* pExpr = taosArrayGetP(pExprInfo, i);
sqlExprDestroy(pExpr);
}
-
+
taosArrayDestroy(pExprInfo);
}
-int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
+int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
assert(src != NULL && dst != NULL);
-
+
size_t size = taosArrayGetSize(src);
for (int32_t i = 0; i < size; ++i) {
- SSqlExpr* pExpr = taosArrayGetP(src, i);
-
- if (pExpr->uid == uid) {
-
- if (deepcopy) {
- SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr));
- if (p1 == NULL) {
- return -1;
- }
+ SExprInfo* pExpr = taosArrayGetP(src, i);
- *p1 = *pExpr;
- memset(p1->param, 0, sizeof(tVariant) * tListLen(p1->param));
+ if (pExpr->base.uid == uid) {
+ if (deepcopy) {
+ SExprInfo* p1 = calloc(1, sizeof(SExprInfo));
+ tscExprAssign(p1, pExpr);
- for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
- tVariantAssign(&p1->param[j], &pExpr->param[j]);
- }
-
taosArrayPush(dst, &p1);
} else {
taosArrayPush(dst, &pExpr);
@@ -1383,19 +2503,34 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco
return 0;
}
-bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) {
- // ignore the tbname columnIndex to be inserted into source list
- if (pColIndex->columnIndex < 0) {
- return false;
+int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) {
+ assert(src != NULL && dst != NULL);
+
+ size_t size = taosArrayGetSize(src);
+ for (int32_t i = 0; i < size; ++i) {
+ SExprInfo* pExpr = taosArrayGetP(src, i);
+
+ if (deepcopy) {
+ SExprInfo* p1 = calloc(1, sizeof(SExprInfo));
+ tscExprAssign(p1, pExpr);
+
+ taosArrayPush(dst, &p1);
+ } else {
+ taosArrayPush(dst, &pExpr);
+ }
}
-
+
+ return 0;
+}
+
+// ignore the tbname columnIndex to be inserted into source list
+int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) {
size_t numOfCols = taosArrayGetSize(pColumnList);
- int16_t col = pColIndex->columnIndex;
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
- if ((pCol->colIndex.columnIndex != col) || (pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
+ if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) {
++i;
continue;
} else {
@@ -1404,28 +2539,44 @@ bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) {
}
if (i >= numOfCols || numOfCols == 0) {
- return false;
+ return -1;
}
- return true;
+ return i;
}
+void tscExprAssign(SExprInfo* dst, const SExprInfo* src) {
+ assert(dst != NULL && src != NULL);
+
+ *dst = *src;
-SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
+ if (src->base.flist.numOfFilters > 0) {
+ dst->base.flist.filterInfo = calloc(src->base.flist.numOfFilters, sizeof(SColumnFilterInfo));
+ memcpy(dst->base.flist.filterInfo, src->base.flist.filterInfo, sizeof(SColumnFilterInfo) * src->base.flist.numOfFilters);
+ }
+
+ dst->pExpr = exprdup(src->pExpr);
+
+ memset(dst->base.param, 0, sizeof(tVariant) * tListLen(dst->base.param));
+ for (int32_t j = 0; j < src->base.numOfParams; ++j) {
+ tVariantAssign(&dst->base.param[j], &src->base.param[j]);
+ }
+}
+
+SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema) {
// ignore the tbname columnIndex to be inserted into source list
- if (pColIndex->columnIndex < 0) {
+ if (columnIndex < 0) {
return NULL;
}
-
+
size_t numOfCols = taosArrayGetSize(pColumnList);
- int16_t col = pColIndex->columnIndex;
int32_t i = 0;
while (i < numOfCols) {
SColumn* pCol = taosArrayGetP(pColumnList, i);
- if (pCol->colIndex.columnIndex < col) {
+ if (pCol->columnIndex < columnIndex) {
i++;
- } else if (pCol->colIndex.tableIndex < pColIndex->tableIndex) {
+ } else if (pCol->tableUid < uid) {
i++;
} else {
break;
@@ -1438,18 +2589,28 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return NULL;
}
- b->colIndex = *pColIndex;
+ b->columnIndex = columnIndex;
+ b->tableUid = uid;
+ b->info.colId = pSchema->colId;
+ b->info.bytes = pSchema->bytes;
+ b->info.type = pSchema->type;
+
taosArrayInsert(pColumnList, i, &b);
} else {
SColumn* pCol = taosArrayGetP(pColumnList, i);
-
- if (i < numOfCols && (pCol->colIndex.columnIndex > col || pCol->colIndex.tableIndex != pColIndex->tableIndex)) {
+
+ if (i < numOfCols && (pCol->columnIndex > columnIndex || pCol->tableUid != uid)) {
SColumn* b = calloc(1, sizeof(SColumn));
if (b == NULL) {
return NULL;
}
- b->colIndex = *pColIndex;
+ b->columnIndex = columnIndex;
+ b->tableUid = uid;
+ b->info.colId = pSchema->colId;
+ b->info.bytes = pSchema->bytes;
+ b->info.type = pSchema->type;
+
taosArrayInsert(pColumnList, i, &b);
}
}
@@ -1461,34 +2622,59 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
SColumn* tscColumnClone(const SColumn* src) {
assert(src != NULL);
-
+
SColumn* dst = calloc(1, sizeof(SColumn));
if (dst == NULL) {
return NULL;
}
- dst->colIndex = src->colIndex;
- dst->numOfFilters = src->numOfFilters;
- dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters);
-
+ tscColumnCopy(dst, src);
return dst;
}
+static void tscColumnDestroy(SColumn* pCol) {
+ destroyFilterInfo(&pCol->info.flist);
+ free(pCol);
+}
+
+void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) {
+ destroyFilterInfo(&pDest->info.flist);
+
+ pDest->columnIndex = pSrc->columnIndex;
+ pDest->tableUid = pSrc->tableUid;
+ pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters;
+ pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters);
+ pDest->info.type = pSrc->info.type;
+ pDest->info.colId = pSrc->info.colId;
+ pDest->info.bytes = pSrc->info.bytes;
+}
-void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
+void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) {
assert(src != NULL && dst != NULL);
-
+
size_t num = taosArrayGetSize(src);
for (int32_t i = 0; i < num; ++i) {
SColumn* pCol = taosArrayGetP(src, i);
- if (pCol->colIndex.tableIndex == tableIndex || tableIndex < 0) {
+ if (pCol->tableUid == tableUid) {
SColumn* p = tscColumnClone(pCol);
taosArrayPush(dst, &p);
}
}
}
+void tscColumnListCopyAll(SArray* dst, const SArray* src) {
+ assert(src != NULL && dst != NULL);
+
+ size_t num = taosArrayGetSize(src);
+ for (int32_t i = 0; i < num; ++i) {
+ SColumn* pCol = taosArrayGetP(src, i);
+ SColumn* p = tscColumnClone(pCol);
+ taosArrayPush(dst, &p);
+ }
+}
+
+
void tscColumnListDestroy(SArray* pColumnList) {
if (pColumnList == NULL) {
return;
@@ -1527,7 +2713,7 @@ static int32_t validateQuoteToken(SStrToken* pToken) {
}
if (k != pToken->n || pToken->type != TK_ID) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return TSDB_CODE_SUCCESS;
}
@@ -1576,18 +2762,19 @@ void tscDequoteAndTrimToken(SStrToken* pToken) {
}
int32_t tscValidateName(SStrToken* pToken) {
- if (pToken->type != TK_STRING && pToken->type != TK_ID) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ if (pToken == NULL || pToken->z == NULL ||
+ (pToken->type != TK_STRING && pToken->type != TK_ID)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
char* sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
if (sep == NULL) { // single part
if (pToken->type == TK_STRING) {
-
+
tscDequoteAndTrimToken(pToken);
tscStrToLower(pToken->z, pToken->n);
//pToken->n = (uint32_t)strtrim(pToken->z);
-
+
int len = tGetToken(pToken->z, &pToken->type);
// single token, validate it
@@ -1596,14 +2783,14 @@ int32_t tscValidateName(SStrToken* pToken) {
} else {
sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
if (sep == NULL) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
return tscValidateName(pToken);
}
} else {
if (isNumber(pToken)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
}
} else { // two part
@@ -1616,15 +2803,15 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->n = tGetToken(pToken->z, &pToken->type);
if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pToken->type != TK_STRING && pToken->type != TK_ID) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t firstPartLen = pToken->n;
@@ -1633,13 +2820,13 @@ int32_t tscValidateName(SStrToken* pToken) {
pToken->n = (uint32_t)(oldLen - (sep - pStr) - 1);
int32_t len = tGetToken(pToken->z, &pToken->type);
if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) {
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
-
+
// re-build the whole name string
if (pStr[firstPartLen] == TS_PATH_DELIMITER[0]) {
// first part do not have quote do nothing
@@ -1672,13 +2859,13 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
return false;
}
- if (colId == TSDB_TBNAME_COLUMN_INDEX || colId == TSDB_BLOCK_DIST_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
+ if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
return true;
}
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
+
int32_t numOfTotal = tinfo.numOfTags + tinfo.numOfColumns;
for (int32_t i = 0; i < numOfTotal; ++i) {
@@ -1704,7 +2891,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
dest->tbnameCond.len = src->tbnameCond.len;
dest->joinInfo.hasJoin = src->joinInfo.hasJoin;
-
+
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
if (src->joinInfo.joinTables[i]) {
dest->joinInfo.joinTables[i] = calloc(1, sizeof(SJoinNode));
@@ -1721,23 +2908,23 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
}
}
-
+
dest->relType = src->relType;
-
+
if (src->pCond == NULL) {
return 0;
}
-
+
size_t s = taosArrayGetSize(src->pCond);
dest->pCond = taosArrayInit(s, sizeof(SCond));
-
+
for (int32_t i = 0; i < s; ++i) {
SCond* pCond = taosArrayGet(src->pCond, i);
-
+
SCond c = {0};
c.len = pCond->len;
c.uid = pCond->uid;
-
+
if (pCond->len > 0) {
assert(pCond->cond != NULL);
c.cond = malloc(c.len);
@@ -1747,7 +2934,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
memcpy(c.cond, pCond->cond, c.len);
}
-
+
taosArrayPush(dest->pCond, &c);
}
@@ -1756,14 +2943,14 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
void tscTagCondRelease(STagCond* pTagCond) {
free(pTagCond->tbnameCond.cond);
-
+
if (pTagCond->pCond != NULL) {
size_t s = taosArrayGetSize(pTagCond->pCond);
for (int32_t i = 0; i < s; ++i) {
SCond* p = taosArrayGet(pTagCond->pCond, i);
tfree(p->cond);
}
-
+
taosArrayDestroy(pTagCond->pCond);
}
@@ -1790,19 +2977,19 @@ void tscTagCondRelease(STagCond* pTagCond) {
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
-
- size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
+
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
- pColInfo[i].functionId = pExpr->functionId;
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ pColInfo[i].functionId = pExpr->base.functionId;
- if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
+ if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) {
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
-
- int16_t index = pExpr->colInfo.colIndex;
+
+ int16_t index = pExpr->base.colInfo.colIndex;
pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY;
} else {
- pColInfo[i].type = pSchema[pExpr->colInfo.colIndex].type;
+ pColInfo[i].type = pSchema[pExpr->base.colInfo.colIndex].type;
}
}
}
@@ -1823,7 +3010,7 @@ bool tscShouldBeFreed(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return false;
}
-
+
STscObj* pTscObj = pSql->pTscObj;
if (pSql->pStream != NULL || pTscObj->hbrid == pSql->self || pSql->pSubscription != NULL) {
return false;
@@ -1845,14 +3032,9 @@ bool tscShouldBeFreed(SSqlObj* pSql) {
* @param tableIndex denote the table index for join query, where more than one table exists
* @return
*/
-STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd* pCmd, int32_t clauseIndex, int32_t tableIndex) {
- if (pCmd == NULL || pCmd->numOfClause == 0) {
- return NULL;
- }
-
- assert(clauseIndex >= 0 && clauseIndex < pCmd->numOfClause);
-
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
+STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd* pCmd, int32_t tableIndex) {
+ assert(pCmd != NULL);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
return tscGetMetaInfo(pQueryInfo, tableIndex);
}
@@ -1869,17 +3051,17 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return pQueryInfo->pTableMetaInfo[tableIndex];
}
-SQueryInfo* tscGetQueryInfoDetailSafely(SSqlCmd* pCmd, int32_t subClauseIndex) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
+SQueryInfo* tscGetQueryInfoS(SSqlCmd* pCmd) {
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
int32_t ret = TSDB_CODE_SUCCESS;
while ((pQueryInfo) == NULL) {
- if ((ret = tscAddSubqueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
+ if ((ret = tscAddQueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
}
- pQueryInfo = tscGetQueryInfoDetail(pCmd, subClauseIndex);
+ pQueryInfo = tscGetQueryInfo(pCmd);
}
return pQueryInfo;
@@ -1903,45 +3085,49 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i
return tscGetMetaInfo(pQueryInfo, k);
}
+// todo refactor
void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
assert(pQueryInfo->fieldsInfo.internalField == NULL);
pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField));
-
+
assert(pQueryInfo->exprList == NULL);
- pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
- pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
- pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
- pQueryInfo->resColumnId = -1000;
- pQueryInfo->limit.limit = -1;
- pQueryInfo->limit.offset = 0;
- pQueryInfo->slimit.limit = -1;
- pQueryInfo->slimit.offset = 0;
+ pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
+ pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
+ pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
+ pQueryInfo->limit.limit = -1;
+ pQueryInfo->limit.offset = 0;
+
+ pQueryInfo->slimit.limit = -1;
+ pQueryInfo->slimit.offset = 0;
+ pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES);
+ pQueryInfo->window = TSWINDOW_INITIALIZER;
+ pQueryInfo->multigroupResult = true;
}
-int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
+int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
assert(pCmd != NULL);
-
- // todo refactor: remove this structure
- size_t s = pCmd->numOfClause + 1;
- char* tmp = realloc(pCmd->pQueryInfo, s * POINTER_BYTES);
- if (tmp == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
-
- pCmd->pQueryInfo = (SQueryInfo**)tmp;
-
SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo));
+
if (pQueryInfo == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tscInitQueryInfo(pQueryInfo);
-
- pQueryInfo->window = TSWINDOW_INITIALIZER;
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
- pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
+ if (pCmd->pQueryInfo == NULL) {
+ pCmd->pQueryInfo = pQueryInfo;
+ } else {
+ SQueryInfo* p = pCmd->pQueryInfo;
+ while(p->sibling != NULL) {
+ p = p->sibling;
+ }
+
+ p->sibling = pQueryInfo;
+ }
+
+ pCmd->active = pQueryInfo;
return TSDB_CODE_SUCCESS;
}
@@ -1949,9 +3135,14 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
tscTagCondRelease(&pQueryInfo->tagCond);
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
- tscSqlExprInfoDestroy(pQueryInfo->exprList);
+ tscExprDestroy(pQueryInfo->exprList);
pQueryInfo->exprList = NULL;
+ if (pQueryInfo->exprList1 != NULL) {
+ tscExprDestroy(pQueryInfo->exprList1);
+ pQueryInfo->exprList1 = NULL;
+ }
+
tscColumnListDestroy(pQueryInfo->colList);
pQueryInfo->colList = NULL;
@@ -1960,19 +3151,127 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
pQueryInfo->groupbyExpr.columnInfo = NULL;
pQueryInfo->groupbyExpr.numOfGroupCols = 0;
}
-
+
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
+ pQueryInfo->fillType = 0;
tfree(pQueryInfo->fillVal);
pQueryInfo->fillType = 0;
tfree(pQueryInfo->buf);
+
+ taosArrayDestroy(pQueryInfo->pUpstream);
+ pQueryInfo->pUpstream = NULL;
}
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
- for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ while (pQueryInfo != NULL) {
+ SQueryInfo* p = pQueryInfo->sibling;
freeQueryInfoImpl(pQueryInfo);
+ pQueryInfo = p;
+ }
+}
+
+int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
+ assert(pQueryInfo != NULL && pSrc != NULL);
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ memcpy(&pQueryInfo->interval, &pSrc->interval, sizeof(pQueryInfo->interval));
+
+ pQueryInfo->command = pSrc->command;
+ pQueryInfo->type = pSrc->type;
+ pQueryInfo->window = pSrc->window;
+ pQueryInfo->limit = pSrc->limit;
+ pQueryInfo->slimit = pSrc->slimit;
+ pQueryInfo->order = pSrc->order;
+ pQueryInfo->vgroupLimit = pSrc->vgroupLimit;
+ pQueryInfo->tsBuf = NULL;
+ pQueryInfo->fillType = pSrc->fillType;
+ pQueryInfo->fillVal = NULL;
+ pQueryInfo->numOfFillVal = 0;;
+ pQueryInfo->clauseLimit = pSrc->clauseLimit;
+ pQueryInfo->prjOffset = pSrc->prjOffset;
+ pQueryInfo->numOfTables = 0;
+ pQueryInfo->window = pSrc->window;
+ pQueryInfo->sessionWindow = pSrc->sessionWindow;
+ pQueryInfo->pTableMetaInfo = NULL;
+ pQueryInfo->multigroupResult = pSrc->multigroupResult;
+
+ pQueryInfo->bufLen = pSrc->bufLen;
+ pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
+ pQueryInfo->arithmeticOnAgg = pSrc->arithmeticOnAgg;
+ pQueryInfo->buf = malloc(pSrc->bufLen);
+ if (pQueryInfo->buf == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ if (pSrc->bufLen > 0) {
+ memcpy(pQueryInfo->buf, pSrc->buf, pSrc->bufLen);
+ }
+
+ pQueryInfo->groupbyExpr = pSrc->groupbyExpr;
+ if (pSrc->groupbyExpr.columnInfo != NULL) {
+ pQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pSrc->groupbyExpr.columnInfo);
+ if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+ }
+
+ if (tscTagCondCopy(&pQueryInfo->tagCond, &pSrc->tagCond) != 0) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ if (pSrc->fillType != TSDB_FILL_NONE) {
+ pQueryInfo->fillVal = calloc(1, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
+ if (pQueryInfo->fillVal == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+ pQueryInfo->numOfFillVal = pSrc->fieldsInfo.numOfOutput;
+
+ memcpy(pQueryInfo->fillVal, pSrc->fillVal, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t));
+ }
+
+ if (tscExprCopyAll(pQueryInfo->exprList, pSrc->exprList, true) != 0) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ if (pQueryInfo->arithmeticOnAgg) {
+ pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES);
+ if (tscExprCopyAll(pQueryInfo->exprList1, pSrc->exprList1, true) != 0) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
}
+
+ tscColumnListCopyAll(pQueryInfo->colList, pSrc->colList);
+ tscFieldInfoCopy(&pQueryInfo->fieldsInfo, &pSrc->fieldsInfo, pQueryInfo->exprList);
+
+ for(int32_t i = 0; i < pSrc->numOfTables; ++i) {
+ STableMetaInfo* p1 = tscGetMetaInfo((SQueryInfo*) pSrc, i);
+
+ STableMeta* pMeta = tscTableMetaDup(p1->pTableMeta);
+ if (pMeta == NULL) {
+ // todo handle the error
+ }
+
+ tscAddTableMetaInfo(pQueryInfo, &p1->name, pMeta, p1->vgroupList, p1->tagColList, p1->pVgroupTables);
+ }
+
+ SArray *pUdfInfo = NULL;
+ if (pSrc->pUdfInfo) {
+ pUdfInfo = taosArrayDup(pSrc->pUdfInfo);
+ }
+
+ pQueryInfo->pUdfInfo = pUdfInfo;
+ pQueryInfo->udfCopy = true;
+
+ _error:
+ return code;
}
void tscFreeVgroupTableInfo(SArray* pVgroupTables) {
@@ -2017,7 +3316,9 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
- info->itemList = taosArrayDup(pInfo->itemList);
+ if (pInfo->itemList) {
+ info->itemList = taosArrayDup(pInfo->itemList);
+ }
}
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
@@ -2043,31 +3344,32 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
- if (removeMeta) {
+ if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
-
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
-
+
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
tscClearTableMetaInfo(pTableMetaInfo);
+
free(pTableMetaInfo);
}
-
+
tfree(pQueryInfo->pTableMetaInfo);
}
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
- void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
- if (pAlloc == NULL) {
+ void* tmp = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
+ if (tmp == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
}
- pQueryInfo->pTableMetaInfo = pAlloc;
+ pQueryInfo->pTableMetaInfo = tmp;
STableMetaInfo* pTableMetaInfo = calloc(1, sizeof(STableMetaInfo));
+
if (pTableMetaInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
@@ -2081,11 +3383,13 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
pTableMetaInfo->pTableMeta = pTableMeta;
if (pTableMetaInfo->pTableMeta == NULL) {
- pTableMetaInfo->tableMetaSize = 0;
+ pTableMetaInfo->tableMetaSize = 0;
} else {
pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
}
+ pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
+
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
}
@@ -2096,12 +3400,12 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
return NULL;
}
- if (pTagCols != NULL) {
- tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
+ if (pTagCols != NULL && pTableMetaInfo->pTableMeta != NULL) {
+ tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, pTableMetaInfo->pTableMeta->id.uid);
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
-
+
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
}
@@ -2129,6 +3433,16 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
pRes->row = 0;
pRes->numOfRows = 0;
+ pRes->dataConverted = false;
+}
+
+void tscInitResForMerge(SSqlRes* pRes) {
+ pRes->qId = 1; // hack to pass the safety check in fetch_row function
+ pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
+ tscResetForNextRetrieve(pRes);
+
+ assert(pRes->pMerger != NULL);
+ pRes->data = pRes->pMerger->buf;
}
void registerSqlObj(SSqlObj* pSql) {
@@ -2149,24 +3463,13 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
+ pNew->rootObj = pSql->rootObj;
SSqlCmd* pCmd = &pNew->cmd;
pCmd->command = cmd;
- pCmd->parseFinished = 1;
- pCmd->autoCreated = pSql->cmd.autoCreated;
-
- int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
- if (code != TSDB_CODE_SUCCESS) {
- tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
- free(pNew);
- return NULL;
- }
+ tsem_init(&pNew->rspSem, 0 ,0);
- if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
-#ifdef __APPLE__
- // to satisfy later tsem_destroy in taos_free_result
- tsem_init(&pNew->rspSem, 0, 0);
-#endif // __APPLE__
+ if (tscAddQueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pNew);
return NULL;
}
@@ -2177,10 +3480,8 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
pNew->sqlstr = NULL;
pNew->maxRetry = TSDB_MAX_REPLICA;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0);
-
- assert(pSql->cmd.clauseIndex == 0);
- STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
+ SQueryInfo* pQueryInfo = tscGetQueryInfoS(pCmd);
+ STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
@@ -2189,19 +3490,18 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
}
static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t uid) {
- int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo);
+ int32_t numOfOutput = (int32_t)tscNumOfExprs(pNewQueryInfo);
if (numOfOutput == 0) {
return;
}
// set the field info in pNewQueryInfo object according to sqlExpr information
- size_t numOfExprs = tscSqlExprNumOfExprs(pNewQueryInfo);
- for (int32_t i = 0; i < numOfExprs; ++i) {
- SSqlExpr* pExpr = tscSqlExprGet(pNewQueryInfo, i);
+ for (int32_t i = 0; i < numOfOutput; ++i) {
+ SExprInfo* pExpr = tscExprGet(pNewQueryInfo, i);
- TAOS_FIELD f = tscCreateField((int8_t) pExpr->resType, pExpr->aliasName, pExpr->resBytes);
+ TAOS_FIELD f = tscCreateField((int8_t) pExpr->base.resType, pExpr->base.aliasName, pExpr->base.resBytes);
SInternalField* pInfo1 = tscFieldInfoAppend(&pNewQueryInfo->fieldsInfo, &f);
- pInfo1->pSqlExpr = pExpr;
+ pInfo1->pExpr = pExpr;
}
// update the pSqlExpr pointer in SInternalField according the field name
@@ -2210,12 +3510,12 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t ui
TAOS_FIELD* field = tscFieldInfoGetField(&pNewQueryInfo->fieldsInfo, f);
bool matched = false;
- for (int32_t k1 = 0; k1 < numOfExprs; ++k1) {
- SSqlExpr* pExpr1 = tscSqlExprGet(pNewQueryInfo, k1);
+ for (int32_t k1 = 0; k1 < numOfOutput; ++k1) {
+ SExprInfo* pExpr1 = tscExprGet(pNewQueryInfo, k1);
- if (strcmp(field->name, pExpr1->aliasName) == 0) { // establish link according to the result field name
+ if (strcmp(field->name, pExpr1->base.aliasName) == 0) { // establish link according to the result field name
SInternalField* pInfo = tscFieldInfoGetInternalField(&pNewQueryInfo->fieldsInfo, f);
- pInfo->pSqlExpr = pExpr1;
+ pInfo->pExpr = pExpr1;
matched = true;
break;
@@ -2238,41 +3538,49 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL;
}
-
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, tableIndex);
+
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
+ STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[tableIndex];
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
- pNew->sqlstr = strdup(pSql->sqlstr);
+ pNew->sqlstr = strdup(pSql->sqlstr);
+ pNew->rootObj = pSql->rootObj;
+ tsem_init(&pNew->rspSem, 0, 0);
SSqlCmd* pnCmd = &pNew->cmd;
memcpy(pnCmd, pCmd, sizeof(SSqlCmd));
-
+
pnCmd->command = cmd;
pnCmd->payload = NULL;
pnCmd->allocSize = 0;
+ pnCmd->pTableMetaMap = NULL;
+
+ pnCmd->pQueryInfo = NULL;
+ pnCmd->insertParam.pDataBlocks = NULL;
- pnCmd->pQueryInfo = NULL;
- pnCmd->numOfClause = 0;
- pnCmd->clauseIndex = 0;
- pnCmd->pDataBlocks = NULL;
+ pnCmd->insertParam.numOfTables = 0;
+ pnCmd->insertParam.pTableNameList = NULL;
+ pnCmd->insertParam.pTableBlockHashList = NULL;
+ pnCmd->insertParam.objectId = pNew->self;
- pnCmd->numOfTables = 0;
- pnCmd->parseFinished = 1;
- pnCmd->pTableNameList = NULL;
- pnCmd->pTableBlockHashList = NULL;
- pnCmd->tagData.data = NULL;
- pnCmd->tagData.dataLen = 0;
+ memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData));
- if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
+ if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
- SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pnCmd, 0);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pnCmd);
+
+ if (pQueryInfo->pUdfInfo) {
+ pNewQueryInfo->pUdfInfo = taosArrayDup(pQueryInfo->pUdfInfo);
+ pNewQueryInfo->udfCopy = true;
+ }
pNewQueryInfo->command = pQueryInfo->command;
+ pnCmd->active = pNewQueryInfo;
+
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
@@ -2283,12 +3591,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->tsBuf = NULL;
pNewQueryInfo->fillType = pQueryInfo->fillType;
pNewQueryInfo->fillVal = NULL;
+ pNewQueryInfo->numOfFillVal = 0;
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
+ pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
-
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
+ pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult;
+
+ pNewQueryInfo->distinct = pQueryInfo->distinct;
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@@ -2306,18 +3618,21 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
goto _error;
}
}
-
+
if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
- pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
+ //just make memory memory sanitizer happy
+ //refactor later
+ pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
if (pNewQueryInfo->fillVal == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
+ pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput;
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
}
@@ -2327,19 +3642,19 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
-
- tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, (int16_t)tableIndex);
+
+ uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
+ tscColumnListCopy(pNewQueryInfo->colList, pQueryInfo->colList, uid);
// set the correct query type
if (pPrevSql != NULL) {
- SQueryInfo* pPrevQueryInfo = tscGetQueryInfoDetail(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex);
+ SQueryInfo* pPrevQueryInfo = tscGetQueryInfo(&pPrevSql->cmd);
pNewQueryInfo->type = pPrevQueryInfo->type;
} else {
TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY);// it must be the subquery
}
- uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
- if (tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true) != 0) {
+ if (tscExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
@@ -2359,14 +3674,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
-
+
} else { // transfer the ownership of pTableMeta to the newly create sql object.
- STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
+ STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
terrno = TSDB_CODE_TSC_APP_ERROR;
goto _error;
}
-
+
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
@@ -2386,29 +3701,29 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
goto _error;
}
-
+
assert(pNewQueryInfo->numOfTables == 1);
-
+
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pFinalInfo->vgroupList != NULL);
}
+ registerSqlObj(pNew);
+
if (cmd == TSDB_SQL_SELECT) {
size_t size = taosArrayGetSize(pNewQueryInfo->colList);
-
- tscDebug(
- "%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
+
+ tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
- pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
+ pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
-
- tscPrintSelectClause(pNew, 0);
+
+ tscPrintSelNodeList(pNew, 0);
} else {
tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex);
}
- registerSqlObj(pNew);
return pNew;
_error:
@@ -2416,69 +3731,254 @@ _error:
return NULL;
}
-/**
- * To decide if current is a two-stage super table query, join query, or insert. And invoke different
- * procedure accordingly
- * @param pSql
- */
-void tscDoQuery(SSqlObj* pSql) {
- SSqlCmd* pCmd = &pSql->cmd;
- SSqlRes* pRes = &pSql->res;
-
- pRes->code = TSDB_CODE_SUCCESS;
-
- if (pCmd->command > TSDB_SQL_LOCAL) {
+void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
+ uint16_t type = pQueryInfo->type;
+ if (QUERY_IS_JOIN_QUERY(type) && !TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) {
+ tscHandleMasterJoinQuery(pSql);
+ } else if (tscMultiRoundQuery(pQueryInfo, 0) && pQueryInfo->round == 0) {
+ tscHandleFirstRoundStableQuery(pSql); // todo lock?
+ } else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
+ tscLockByThread(&pSql->squeryLock);
+ tscHandleMasterSTableQuery(pSql);
+ tscUnlockByThread(&pSql->squeryLock);
+ } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
+ if (TSDB_QUERY_HAS_TYPE(pSql->cmd.insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
+ tscImportDataFromFile(pSql);
+ } else {
+ tscHandleMultivnodeInsert(pSql);
+ }
+ } else if (pSql->cmd.command > TSDB_SQL_LOCAL) {
tscProcessLocalCmd(pSql);
+ } else { // send request to server directly
+ tscBuildAndSendRequest(pSql, pQueryInfo);
+ }
+}
+
+void doRetrieveSubqueryData(SSchedMsg *pMsg) {
+ SSqlObj* pSql = (SSqlObj*) pMsg->ahandle;
+ if (pSql == NULL || pSql->signature != pSql) {
+ tscDebug("%p SqlObj is freed, not add into queue async res", pMsg->ahandle);
return;
}
-
- if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
- tscImportDataFromFile(pSql);
+
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+ handleDownstreamOperator(pSql->pSubs, pSql->subState.numOfSub, pQueryInfo, pSql);
+
+ pSql->res.qId = -1;
+ if (pSql->res.code == TSDB_CODE_SUCCESS) {
+ (*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows);
} else {
- SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
- uint16_t type = pQueryInfo->type;
+ tscAsyncResultOnError(pSql);
+ }
+}
- if ((pCmd->command == TSDB_SQL_SELECT) && (((!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) &&
- (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_STABLE_SUBQUERY))) ||
- (tscIsProjectionQuery(pQueryInfo)))) {
- tscAddIntoSqlList(pSql);
- }
-
- if (TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_INSERT)) { // multi-vnodes insertion
- tscHandleMultivnodeInsert(pSql);
+// NOTE: the blocking query can not be executed in the rpc message handler thread
+static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) {
+ // handle the pDownStream process
+ SRetrieveSupport* ps = param;
+ SSqlObj* pParentSql = ps->pParentSql;
+ SSqlObj* pSql = tres;
+
+ int32_t index = ps->subqueryIndex;
+ bool ret = subAndCheckDone(pSql, pParentSql, index);
+
+ // TODO refactor
+ tfree(ps);
+ pSql->param = NULL;
+
+ if (!ret) {
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
+ return;
+ }
+
+ pParentSql->cmd.active = pParentSql->cmd.pQueryInfo;
+ pParentSql->res.qId = -1;
+ if (pSql->res.code == TSDB_CODE_SUCCESS) {
+ (*pSql->fp)(pParentSql->param, pParentSql, pParentSql->res.numOfRows);
+ } else {
+ pParentSql->res.code = pSql->res.code;
+ tscAsyncResultOnError(pParentSql);
+ }
+}
+
+static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
+ SSqlObj* pSql = tres;
+ SRetrieveSupport* ps = param;
+
+ if (pSql->res.code != TSDB_CODE_SUCCESS) {
+ SSqlObj* pParentSql = ps->pParentSql;
+
+ int32_t index = ps->subqueryIndex;
+ bool ret = subAndCheckDone(pSql, pParentSql, index);
+
+ tscFreeRetrieveSup(pSql);
+
+ if (!ret) {
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
return;
}
-
- if (QUERY_IS_JOIN_QUERY(type)) {
- if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) {
- tscHandleMasterJoinQuery(pSql);
- } else { // for first stage sub query, iterate all vnodes to get all timestamp
- if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
- tscProcessSql(pSql);
- } else { // secondary stage join query.
- if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
- tscLockByThread(&pSql->squeryLock);
- tscHandleMasterSTableQuery(pSql);
- tscUnlockByThread(&pSql->squeryLock);
- } else {
- tscProcessSql(pSql);
- }
- }
- }
+ // todo refactor
+ tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
+
+ SSqlObj *rootObj = pParentSql->rootObj;
+
+ if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && rootObj->retry < rootObj->maxRetry)) {
+ pParentSql->res.code = code;
+
+ tscAsyncResultOnError(pParentSql);
return;
- } else if (tscMultiRoundQuery(pQueryInfo, 0) && pQueryInfo->round == 0) {
- tscHandleFirstRoundStableQuery(pSql); // todo lock?
+ }
+
+ tscFreeSubobj(pParentSql);
+ tfree(pParentSql->pSubs);
+
+ tscFreeSubobj(rootObj);
+ tfree(rootObj->pSubs);
+
+ rootObj->res.code = TSDB_CODE_SUCCESS;
+ rootObj->retry++;
+
+ tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", rootObj->self,
+ tstrerror(code), rootObj->retry);
+
+
+ tscResetSqlCmd(&rootObj->cmd, true);
+
+ code = tsParseSql(rootObj, true);
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
- } else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
- tscLockByThread(&pSql->squeryLock);
- tscHandleMasterSTableQuery(pSql);
- tscUnlockByThread(&pSql->squeryLock);
+ }
+
+ if (code != TSDB_CODE_SUCCESS) {
+ rootObj->res.code = code;
+ tscAsyncResultOnError(rootObj);
return;
}
+
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&rootObj->cmd);
+
+ executeQuery(rootObj, pQueryInfo);
+ return;
+ }
+
+ if (pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
+ SSqlObj* pParentSql = ps->pParentSql;
+
+ pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
- tscProcessSql(pSql);
+ (*pParentSql->fp)(pParentSql->param, pParentSql, 0);
+ return;
+ }
+
+
+ taos_fetch_rows_a(tres, tscSubqueryRetrieveCallback, param);
+}
+
+// do execute the query according to the query execution plan
+void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t numOfInit = 0;
+
+ if (pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
+ (*pSql->fp)(pSql->param, pSql, 0);
+ return;
+ }
+
+ if (pSql->cmd.command == TSDB_SQL_SELECT) {
+ tscAddIntoSqlList(pSql);
+ }
+
+ if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
+ assert(pSql->subState.numOfSub == 0);
+ pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
+ assert(pSql->pSubs == NULL);
+ pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
+ assert(pSql->subState.states == NULL);
+ pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
+ code = pthread_mutex_init(&pSql->subState.mutex, NULL);
+
+ if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != TSDB_CODE_SUCCESS) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
+ SQueryInfo* pSub = taosArrayGetP(pQueryInfo->pUpstream, i);
+
+ pSql->cmd.active = pSub;
+ pSql->cmd.command = TSDB_SQL_SELECT;
+
+ SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
+ if (pNew == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto _error;
+ }
+
+ pNew->pTscObj = pSql->pTscObj;
+ pNew->signature = pNew;
+ pNew->sqlstr = strdup(pSql->sqlstr);
+ pNew->fp = tscSubqueryCompleteCallback;
+ pNew->fetchFp = tscSubqueryCompleteCallback;
+ pNew->maxRetry = pSql->maxRetry;
+ pNew->rootObj = pSql->rootObj;
+
+ pNew->cmd.resColumnId = TSDB_RES_COL_ID;
+
+ tsem_init(&pNew->rspSem, 0, 0);
+
+ SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
+ if (ps == NULL) {
+ tscFreeSqlObj(pNew);
+ goto _error;
+ }
+
+ ps->pParentSql = pSql;
+ ps->subqueryIndex = i;
+
+ pNew->param = ps;
+ pSql->pSubs[i] = pNew;
+
+ SSqlCmd* pCmd = &pNew->cmd;
+ pCmd->command = TSDB_SQL_SELECT;
+ if ((code = tscAddQueryInfo(pCmd)) != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pCmd);
+ tscQueryInfoCopy(pNewQueryInfo, pSub);
+
+ TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY);
+ numOfInit++;
+ }
+
+ for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
+ SSqlObj* psub = pSql->pSubs[i];
+ registerSqlObj(psub);
+
+ // create sub query to handle the sub query.
+ SQueryInfo* pq = tscGetQueryInfo(&psub->cmd);
+ executeQuery(psub, pq);
+ }
+
+ return;
+ }
+
+ pSql->cmd.active = pQueryInfo;
+ doExecuteQuery(pSql, pQueryInfo);
+ return;
+
+ _error:
+ for(int32_t i = 0; i < numOfInit; ++i) {
+ SSqlObj* p = pSql->pSubs[i];
+ tscFreeSqlObj(p);
}
+
+ pSql->res.code = code;
+ pSql->subState.numOfSub = 0; // not initialized sub query object will not be freed
+ tfree(pSql->subState.states);
+ tfree(pSql->pSubs);
+ tscAsyncResultOnError(pSql);
}
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) {
@@ -2536,17 +4036,15 @@ bool tscIsQueryWithLimit(SSqlObj* pSql) {
}
SSqlCmd* pCmd = &pSql->cmd;
- for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
- SQueryInfo* pqi = tscGetQueryInfoDetailSafely(pCmd, i);
- if (pqi == NULL) {
- continue;
- }
-
+ SQueryInfo* pqi = tscGetQueryInfo(pCmd);
+ while(pqi != NULL) {
if (pqi->limit.limit > 0) {
return true;
}
+
+ pqi = pqi->sibling;
}
-
+
return false;
}
@@ -2556,7 +4054,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
const char* msgFormat3 = "%s";
- const char* prefix = "syntax error";
+ const char* prefix = "syntax error";
const int32_t BACKWARD_CHAR_STEP = 0;
if (sql == NULL) {
@@ -2571,24 +4069,24 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
if (additionalInfo != NULL) {
sprintf(msg, msgFormat2, buf, additionalInfo);
} else {
- const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
+ const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
sprintf(msg, msgFormat, buf);
}
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
-int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
- const char* msgFormat1 = "invalid SQL: %s";
- const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
- const char* msgFormat3 = "invalid SQL: \'%s\'";
+int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char* sql) {
+ const char* msgFormat1 = "invalid operation: %s";
+ const char* msgFormat2 = "invalid operation: \'%s\' (%s)";
+ const char* msgFormat3 = "invalid operation: \'%s\'";
const int32_t BACKWARD_CHAR_STEP = 0;
if (sql == NULL) {
assert(additionalInfo != NULL);
sprintf(msg, msgFormat1, additionalInfo);
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
char buf[64] = {0}; // only extract part of sql string
@@ -2600,7 +4098,32 @@ int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* s
sprintf(msg, msgFormat3, buf); // no additional information for invalid sql error
}
- return TSDB_CODE_TSC_INVALID_SQL;
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+}
+
+int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql) {
+ const char* msgFormat1 = "%s:%s";
+ const char* msgFormat2 = "%s:\'%s\' (%s)";
+ const char* msgFormat3 = "%s:\'%s\'";
+
+ const int32_t BACKWARD_CHAR_STEP = 0;
+
+ if (sql == NULL) {
+ assert(errMsg != NULL);
+ sprintf(dstBuffer, msgFormat1, tstrerror(code), errMsg);
+ return code;
+ }
+
+ char buf[64] = {0}; // only extract part of sql string
+ strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1);
+
+ if (errMsg != NULL) {
+ sprintf(dstBuffer, msgFormat2, tstrerror(code), buf, errMsg);
+ } else {
+ sprintf(dstBuffer, msgFormat3, tstrerror(code), buf); // no additional information for invalid sql error
+ }
+
+ return code;
}
bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
@@ -2622,14 +4145,14 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) {
}
assert(pRes->completed);
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// for normal table, no need to try any more if results are all retrieved from one vnode
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || (pTableMetaInfo->vgroupList == NULL)) {
return false;
}
-
+
int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
if (pTableMetaInfo->pVgroupTables != NULL) {
numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
@@ -2640,14 +4163,15 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) {
}
bool hasMoreClauseToTry(SSqlObj* pSql) {
- return pSql->cmd.clauseIndex < pSql->cmd.numOfClause - 1;
+ SSqlCmd* pCmd = &pSql->cmd;
+ return pCmd->active->sibling != NULL;
}
void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
/*
* no result returned from the current virtual node anymore, try the next vnode if exists
@@ -2655,7 +4179,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
*/
assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes));
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
+
int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
if (++pTableMetaInfo->vgroupIndex < totalVgroups) {
tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql->self,
@@ -2676,7 +4200,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
pQueryInfo->limit.offset = pRes->offset;
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
-
+
tscDebug("0x%"PRIx64" new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64,
pSql->self, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit);
@@ -2693,7 +4217,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
// set the callback function
pSql->fp = fp;
- tscProcessSql(pSql);
+ tscBuildAndSendRequest(pSql, NULL);
} else {
tscDebug("0x%"PRIx64" try all %d vnodes, query complete. current numOfRes:%" PRId64, pSql->self, totalVgroups, pRes->numOfClauseTotal);
}
@@ -2703,35 +4227,35 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
- // current subclause is completed, try the next subclause
- assert(pCmd->clauseIndex < pCmd->numOfClause - 1);
-
- pCmd->clauseIndex++;
- SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
+ SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
pSql->cmd.command = pQueryInfo->command;
//backup the total number of result first
int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal;
-
// DON't free final since it may be recoreded and used later in APP
TAOS_FIELD* finalBk = pRes->final;
pRes->final = NULL;
tscFreeSqlResult(pSql);
+
pRes->final = finalBk;
-
pRes->numOfTotal = num;
-
+
+ for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
+ taos_free_result(pSql->pSubs[i]);
+ }
+
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
+
pSql->fp = fp;
- tscDebug("0x%"PRIx64" try data in the next subclause:%d, total subclause:%d", pSql->self, pCmd->clauseIndex, pCmd->numOfClause);
+ tscDebug("0x%"PRIx64" try data in the next subclause", pSql->self);
if (pCmd->command > TSDB_SQL_LOCAL) {
tscProcessLocalCmd(pSql);
} else {
- tscDoQuery(pSql);
+ executeQuery(pSql, pQueryInfo);
}
}
@@ -2761,7 +4285,7 @@ char* strdup_throw(const char* str) {
int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corMgmtEpSet) {
corMgmtEpSet->version = 0;
- // init mgmt ip set
+ // init mgmt ip set
SRpcEpSet *mgmtEpSet = &(corMgmtEpSet->epSet);
mgmtEpSet->numOfEps = 0;
mgmtEpSet->inUse = 0;
@@ -2933,10 +4457,10 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
int32_t totalCols = 0;
- if (pTableMeta->tableInfo.numOfColumns >= 0 && pTableMeta->tableInfo.numOfTags >= 0) {
+ if (pTableMeta->tableInfo.numOfColumns >= 0) {
totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
}
-
+
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
}
@@ -2954,25 +4478,43 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
return cMeta;
}
-int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf) {
- assert(pChild != NULL && buf != NULL);
+int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity) {
+ assert(*ppChild != NULL);
+
+ STableMeta* p = NULL;
+ size_t sz = 0;
+ STableMeta* pChild = *ppChild;
+ STableMeta* pChild1;
- STableMeta* p = buf;
- taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
+ if(NULL == taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz)) {
+ tfree(p);
+ }
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
- if (p->id.uid > 0 && pChild->suid == p->id.uid) {
+ if (p && p->id.uid > 0 && pChild->suid == p->id.uid) {
+
+ int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
+ int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
+ if (*tableMetaCapacity < tableMetaSize) {
+ pChild1 = realloc(pChild, tableMetaSize);
+ if(pChild1 == NULL)
+ return -1;
+ pChild = pChild1;
+ *tableMetaCapacity = (size_t)tableMetaSize;
+ }
+
pChild->sversion = p->sversion;
pChild->tversion = p->tversion;
+ memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableComInfo));
+ memcpy(pChild->schema, p->schema, totalBytes);
- memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableInfo));
- int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags;
-
- memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
+ *ppChild = pChild;
+ tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
- taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ tfree(p);
+ taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return -1;
}
}
@@ -2983,26 +4525,551 @@ uint32_t tscGetTableMetaMaxSize() {
STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
- uint32_t size = tscGetTableMetaSize(pTableMeta);
- STableMeta* p = calloc(1, size);
+ size_t size = tscGetTableMetaSize(pTableMeta);
+
+ STableMeta* p = malloc(size);
memcpy(p, pTableMeta, size);
return p;
}
+SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) {
+ assert(pVgroupsInfo != NULL);
+
+ size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo);
+ SVgroupsInfo* pInfo = calloc(1, size);
+ pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups;
+ for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) {
+ tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]);
+ }
+ return pInfo;
+}
+
+int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, SExprInfo*** pExpr, int32_t* num) {
+ if (!pQueryInfo->arithmeticOnAgg) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ *num = tscNumOfFields(pQueryInfo);
+ *pExpr = calloc(*(num), POINTER_BYTES);
+ if ((*pExpr) == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < (*num); ++i) {
+ SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
+ SExprInfo* pSource = pField->pExpr;
+
+ SExprInfo* px = calloc(1, sizeof(SExprInfo));
+ (*pExpr)[i] = px;
+
+ SSqlExpr *pse = &px->base;
+ pse->uid = pTableMetaInfo->pTableMeta->id.uid;
+ pse->resColId = pSource->base.resColId;
+ strncpy(pse->aliasName, pSource->base.aliasName, tListLen(pse->aliasName));
+ strncpy(pse->token, pSource->base.token, tListLen(pse->token));
+
+ if (pSource->base.functionId != TSDB_FUNC_ARITHM) { // this should be switched to projection query
+ pse->numOfParams = 0; // no params for projection query
+ pse->functionId = TSDB_FUNC_PRJ;
+ pse->colInfo.colId = pSource->base.resColId;
+
+ int32_t numOfOutput = (int32_t) taosArrayGetSize(pQueryInfo->exprList);
+ for (int32_t j = 0; j < numOfOutput; ++j) {
+ SExprInfo* p = taosArrayGetP(pQueryInfo->exprList, j);
+ if (p->base.resColId == pse->colInfo.colId) {
+ pse->colInfo.colIndex = j;
+ break;
+ }
+ }
+
+ pse->colInfo.flag = TSDB_COL_NORMAL;
+ pse->resType = pSource->base.resType;
+ pse->resBytes = pSource->base.resBytes;
+ strncpy(pse->colInfo.name, pSource->base.aliasName, tListLen(pse->colInfo.name));
+
+ // TODO restore refactor
+ int32_t functionId = pSource->base.functionId;
+ if (pSource->base.functionId == TSDB_FUNC_FIRST_DST) {
+ functionId = TSDB_FUNC_FIRST;
+ } else if (pSource->base.functionId == TSDB_FUNC_LAST_DST) {
+ functionId = TSDB_FUNC_LAST;
+ } else if (pSource->base.functionId == TSDB_FUNC_STDDEV_DST) {
+ functionId = TSDB_FUNC_STDDEV;
+ }
+
+ int32_t inter = 0;
+ getResultDataInfo(pSource->base.colType, pSource->base.colBytes, functionId, 0, &pse->resType,
+ &pse->resBytes, &inter, 0, false, NULL);
+ pse->colType = pse->resType;
+ pse->colBytes = pse->resBytes;
+
+ } else { // arithmetic expression
+ pse->colInfo.colId = pSource->base.colInfo.colId;
+ pse->colType = pSource->base.colType;
+ pse->colBytes = pSource->base.colBytes;
+ pse->resBytes = sizeof(double);
+ pse->resType = TSDB_DATA_TYPE_DOUBLE;
+
+ pse->functionId = pSource->base.functionId;
+ pse->numOfParams = pSource->base.numOfParams;
+
+ for (int32_t j = 0; j < pSource->base.numOfParams; ++j) {
+ tVariantAssign(&pse->param[j], &pSource->base.param[j]);
+ buildArithmeticExprFromMsg(px, NULL);
+ }
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t createGlobalAggregateExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInfo) {
+ assert(tscIsTwoStageSTableQuery(pQueryInfo, 0));
+
+ pQueryAttr->numOfExpr3 = (int32_t) tscNumOfExprs(pQueryInfo);
+ pQueryAttr->pExpr3 = calloc(pQueryAttr->numOfExpr3, sizeof(SExprInfo));
+ if (pQueryAttr->pExpr3 == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ for (int32_t i = 0; i < pQueryAttr->numOfExpr3; ++i) {
+ SExprInfo* pExpr = &pQueryAttr->pExpr1[i];
+ SSqlExpr* pse = &pQueryAttr->pExpr3[i].base;
+
+ tscExprAssign(&pQueryAttr->pExpr3[i], pExpr);
+ pse->colInfo.colId = pExpr->base.resColId;
+ pse->colInfo.colIndex = i;
+
+ pse->colType = pExpr->base.resType;
+ pse->colBytes = pExpr->base.resBytes;
+ }
+
+ {
+ for (int32_t i = 0; i < pQueryAttr->numOfExpr3; ++i) {
+ SExprInfo* pExpr = &pQueryAttr->pExpr1[i];
+ SSqlExpr* pse = &pQueryAttr->pExpr3[i].base;
+
+ // the final result size and type in the same as query on single table.
+ // so here, set the flag to be false;
+ int32_t inter = 0;
+
+ int32_t functionId = pExpr->base.functionId;
+ if (functionId >= TSDB_FUNC_TS && functionId <= TSDB_FUNC_DIFF) {
+ continue;
+ }
+
+ if (functionId == TSDB_FUNC_FIRST_DST) {
+ functionId = TSDB_FUNC_FIRST;
+ } else if (functionId == TSDB_FUNC_LAST_DST) {
+ functionId = TSDB_FUNC_LAST;
+ } else if (functionId == TSDB_FUNC_STDDEV_DST) {
+ functionId = TSDB_FUNC_STDDEV;
+ }
+
+ SUdfInfo* pUdfInfo = NULL;
+
+ if (functionId < 0) {
+ pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1);
+ }
+
+ getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, functionId, 0, &pse->resType, &pse->resBytes, &inter,
+ 0, false, pUdfInfo);
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo) {
+ if (pTableMetaInfo->tagColList == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pQueryAttr->numOfTags = (int16_t)taosArrayGetSize(pTableMetaInfo->tagColList);
+ if (pQueryAttr->numOfTags == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[0]->pTableMeta;
+
+ int32_t numOfTagColumns = tscGetNumOfTags(pTableMeta);
+
+ pQueryAttr->tagColList = calloc(pQueryAttr->numOfTags, sizeof(SColumnInfo));
+ if (pQueryAttr->tagColList == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ SSchema* pSchema = tscGetTableTagSchema(pTableMeta);
+ for (int32_t i = 0; i < pQueryAttr->numOfTags; ++i) {
+ SColumn* pCol = taosArrayGetP(pTableMetaInfo->tagColList, i);
+ SSchema* pColSchema = &pSchema[pCol->columnIndex];
+
+ if ((pCol->columnIndex >= numOfTagColumns || pCol->columnIndex < TSDB_TBNAME_COLUMN_INDEX) ||
+ (!isValidDataType(pColSchema->type))) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ SColumnInfo* pTagCol = &pQueryAttr->tagColList[i];
+
+ pTagCol->colId = pColSchema->colId;
+ pTagCol->bytes = pColSchema->bytes;
+ pTagCol->type = pColSchema->type;
+ pTagCol->flist.numOfFilters = 0;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) {
int16_t numOfCols = (int16_t)taosArrayGetSize(pQueryInfo->colList);
int32_t len = 0;
for(int32_t i = 0; i < numOfCols; ++i) {
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
- for (int32_t j = 0; j < pCol->numOfFilters; ++j) {
- SColumnFilterInfo *pColFilter = &pCol->filterInfo[j];
+ for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) {
len += sizeof(SColumnFilterInfo);
- if (pColFilter->filterstr) {
- len += (int32_t)pColFilter->len + 1;
+ if (pCol->info.flist.filterInfo[j].filterstr) {
+ len += (int32_t)pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE;
}
}
}
return len;
}
+int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo) {
+ // serialize tag column query condition
+ if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) {
+ STagCond* pTagCond = &pQueryInfo->tagCond;
+
+ STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
+ SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
+ if (pCond != NULL && pCond->cond != NULL) {
+ return pCond->len;
+ }
+ }
+ return 0;
+}
+
+int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) {
+ memset(pQueryAttr, 0, sizeof(SQueryAttr));
+
+ int16_t numOfCols = (int16_t) taosArrayGetSize(pQueryInfo->colList);
+ int16_t numOfOutput = (int16_t) tscNumOfExprs(pQueryInfo);
+
+ pQueryAttr->topBotQuery = tscIsTopBotQuery(pQueryInfo);
+ pQueryAttr->hasTagResults = hasTagValOutput(pQueryInfo);
+ pQueryAttr->stabledev = isStabledev(pQueryInfo);
+ pQueryAttr->tsCompQuery = isTsCompQuery(pQueryInfo);
+ pQueryAttr->diffQuery = tscIsDiffDerivQuery(pQueryInfo);
+ pQueryAttr->simpleAgg = isSimpleAggregateRv(pQueryInfo);
+ pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
+ pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);
+ pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo);
+ pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
+ pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
+ pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
+ pQueryAttr->distinct = pQueryInfo->distinct;
+ pQueryAttr->sw = pQueryInfo->sessionWindow;
+ pQueryAttr->stateWindow = pQueryInfo->stateWindow;
+ pQueryAttr->multigroupResult = pQueryInfo->multigroupResult;
+
+ pQueryAttr->numOfCols = numOfCols;
+ pQueryAttr->numOfOutput = numOfOutput;
+ pQueryAttr->limit = pQueryInfo->limit;
+ pQueryAttr->slimit = pQueryInfo->slimit;
+ pQueryAttr->order = pQueryInfo->order;
+ pQueryAttr->fillType = pQueryInfo->fillType;
+ pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
+ pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
+
+ if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor
+ pQueryAttr->window = pQueryInfo->window;
+ } else {
+ pQueryAttr->window.skey = pQueryInfo->window.ekey;
+ pQueryAttr->window.ekey = pQueryInfo->window.skey;
+ }
+
+ memcpy(&pQueryAttr->interval, &pQueryInfo->interval, sizeof(pQueryAttr->interval));
+
+ STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
+
+ if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
+ pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
+ *(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
+ pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
+ } else {
+ assert(pQueryInfo->groupbyExpr.columnInfo == NULL);
+ }
+
+ pQueryAttr->pExpr1 = calloc(pQueryAttr->numOfOutput, sizeof(SExprInfo));
+ for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ tscExprAssign(&pQueryAttr->pExpr1[i], pExpr);
+
+ if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_ARITHM) {
+ for (int32_t j = 0; j < pQueryAttr->pExpr1[i].base.numOfParams; ++j) {
+ buildArithmeticExprFromMsg(&pQueryAttr->pExpr1[i], NULL);
+ }
+ }
+ }
+
+ pQueryAttr->tableCols = calloc(numOfCols, sizeof(SColumnInfo));
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i);
+ if (!isValidDataType(pCol->info.type) || pCol->info.type == TSDB_DATA_TYPE_NULL) {
+ assert(0);
+ }
+
+ pQueryAttr->tableCols[i] = pCol->info;
+ pQueryAttr->tableCols[i].flist.filterInfo = tFilterInfoDup(pCol->info.flist.filterInfo, pQueryAttr->tableCols[i].flist.numOfFilters);
+ }
+
+ // global aggregate query
+ if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0) && tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
+ createGlobalAggregateExpr(pQueryAttr, pQueryInfo);
+ }
+
+ // for simple table, not for super table
+ if (pQueryInfo->arithmeticOnAgg) {
+ pQueryAttr->numOfExpr2 = (int32_t) taosArrayGetSize(pQueryInfo->exprList1);
+ pQueryAttr->pExpr2 = calloc(pQueryAttr->numOfExpr2, sizeof(SExprInfo));
+ for(int32_t i = 0; i < pQueryAttr->numOfExpr2; ++i) {
+ SExprInfo* p = taosArrayGetP(pQueryInfo->exprList1, i);
+ tscExprAssign(&pQueryAttr->pExpr2[i], p);
+ }
+ }
+
+ // tag column info
+ int32_t code = createTagColumnInfo(pQueryAttr, pQueryInfo, pTableMetaInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (pQueryAttr->fillType != TSDB_FILL_NONE) {
+ pQueryAttr->fillVal = calloc(pQueryInfo->numOfFillVal, sizeof(int64_t));
+ memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryInfo->numOfFillVal * sizeof(int64_t));
+ }
+
+ pQueryAttr->srcRowSize = 0;
+ pQueryAttr->maxTableColumnWidth = 0;
+ for (int16_t i = 0; i < numOfCols; ++i) {
+ pQueryAttr->srcRowSize += pQueryAttr->tableCols[i].bytes;
+ if (pQueryAttr->maxTableColumnWidth < pQueryAttr->tableCols[i].bytes) {
+ pQueryAttr->maxTableColumnWidth = pQueryAttr->tableCols[i].bytes;
+ }
+ }
+
+ pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr);
+
+ if (pQueryAttr->numOfCols <= 0 && !tscQueryTags(pQueryInfo) && !pQueryAttr->queryBlockDist) {
+ tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", addr,
+ (uint64_t)pQueryAttr->numOfCols, numOfCols);
+
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (pQueryAttr->interval.interval < 0) {
+ tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, addr,
+ (int64_t)pQueryInfo->interval.interval);
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ if (pQueryAttr->pGroupbyExpr != NULL && pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
+ tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols);
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSqlObj* pSql) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SSqlCmd* pCmd = &pSql->cmd;
+
+ char tablename[TSDB_TABLE_FNAME_LEN] = {0};
+ int32_t len = 0;
+
+ if (nextStr == NULL) {
+ tstrncpy(tablename, *str, TSDB_TABLE_FNAME_LEN);
+ len = (int32_t) strlen(tablename);
+ } else {
+ len = (int32_t)(nextStr - (*str));
+ if (len >= TSDB_TABLE_NAME_LEN) {
+ sprintf(pCmd->payload, "table name too long");
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
+ memcpy(tablename, *str, nextStr - (*str));
+ tablename[len] = '\0';
+ }
+
+ (*str) = nextStr + 1;
+ len = (int32_t)strtrim(tablename);
+
+ SStrToken sToken = {.n = len, .type = TK_ID, .z = tablename};
+ tGetToken(tablename, &sToken.type);
+
+ // Check if the table name available or not
+ if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) {
+ sprintf(pCmd->payload, "table name is invalid");
+ return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ }
+
+ SName name = {0};
+ if ((code = tscSetTableFullName(&name, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ memset(tablename, 0, tListLen(tablename));
+ tNameExtractFullName(&name, tablename);
+
+ char* p = strdup(tablename);
+ taosArrayPush(pNameArray, &p);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t nameComparFn(const void* n1, const void* n2) {
+ int32_t ret = strcmp(*(char**)n1, *(char**)n2);
+ if (ret == 0) {
+ return 0;
+ } else {
+ return ret > 0? 1:-1;
+ }
+}
+
+static void freeContent(void* p) {
+ char* ptr = *(char**)p;
+ tfree(ptr);
+}
+
+int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray) {
+ SSqlCmd *pCmd = &pSql->cmd;
+
+ pCmd->command = TSDB_SQL_MULTI_META;
+ pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META;
+
+ int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ char *str = (char *)pNameList;
+
+ SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd);
+ if (pQueryInfo == NULL) {
+ pSql->res.code = terrno;
+ return terrno;
+ }
+
+ char *nextStr;
+ while (1) {
+ nextStr = strchr(str, ',');
+ if (nextStr == NULL) {
+ code = doAddTableName(nextStr, &str, pNameArray, pSql);
+ break;
+ }
+
+ code = doAddTableName(nextStr, &str, pNameArray, pSql);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (taosArrayGetSize(pNameArray) > TSDB_MULTI_TABLEMETA_MAX_NUM) {
+ code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ sprintf(pCmd->payload, "tables over the max number");
+ return code;
+ }
+ }
+
+ size_t len = taosArrayGetSize(pNameArray);
+ if (len == 1) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (len > TSDB_MULTI_TABLEMETA_MAX_NUM) {
+ code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ sprintf(pCmd->payload, "tables over the max number");
+ return code;
+ }
+
+ taosArraySort(pNameArray, nameComparFn);
+ taosArrayRemoveDuplicate(pNameArray, nameComparFn, freeContent);
+ return TSDB_CODE_SUCCESS;
+}
+
+bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
+ assert(pExisted != NULL && src != NULL);
+ if (pExisted->numOfEps != src->numOfEps) {
+ return false;
+ }
+
+ for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
+ if (pExisted->ep[i].port != src->epAddr[i].port) {
+ return false;
+ }
+
+ if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
+ assert(pVgroupMsg != NULL);
+
+ SNewVgroupInfo info = {0};
+ info.numOfEps = pVgroupMsg->numOfEps;
+ info.vgId = pVgroupMsg->vgId;
+ info.inUse = 0; // 0 is the default value of inUse in case of multiple replica
+
+ assert(info.numOfEps >= 1 && info.vgId >= 1);
+ for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
+ tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
+ info.ep[i].port = pVgroupMsg->epAddr[i].port;
+ }
+
+ return info;
+}
+
+void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
+ char fname[TSDB_TABLE_FNAME_LEN] = {0};
+ tNameExtractFullName(&pTableMetaInfo->name, fname);
+
+ int32_t len = (int32_t) strnlen(fname, TSDB_TABLE_FNAME_LEN);
+ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
+ void* pv = taosCacheAcquireByKey(tscVgroupListBuf, fname, len);
+ if (pv != NULL) {
+ taosCacheRelease(tscVgroupListBuf, &pv, true);
+ }
+ }
+
+ taosHashRemove(tscTableMetaMap, fname, len);
+ tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
+}
+
+char* cloneCurrentDBName(SSqlObj* pSql) {
+ char *p = NULL;
+ HttpContext *pCtx = NULL;
+
+ pthread_mutex_lock(&pSql->pTscObj->mutex);
+ STscObj *pTscObj = pSql->pTscObj;
+ switch (pTscObj->from) {
+ case TAOS_REQ_FROM_HTTP:
+ pCtx = pSql->param;
+ if (pCtx && pCtx->db[0] != '\0') {
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
+ int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
+ assert(len <= sizeof(db));
+
+ p = strdup(db);
+ }
+ break;
+ default:
+ break;
+ }
+ if (p == NULL) {
+ p = strdup(pSql->pTscObj->db);
+ }
+ pthread_mutex_unlock(&pSql->pTscObj->mutex);
+
+ return p;
+}
diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt
index fb8cbbe31b358a8fbde3b90cd84fe6158cca6239..24bfb44ac90e11e01ba99423aa68bd5a9511f746 100644
--- a/src/client/tests/CMakeLists.txt
+++ b/src/client/tests/CMakeLists.txt
@@ -8,7 +8,12 @@ FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib6
IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
MESSAGE(STATUS "gTest library found, build unit test")
- INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR})
+ # GoogleTest requires at least C++11
+ SET(CMAKE_CXX_STANDARD 11)
+
+ INCLUDE_DIRECTORIES(/usr/include /usr/local/include)
+ LINK_DIRECTORIES(/usr/lib /usr/local/lib)
+
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST})
diff --git a/src/client/tests/cliTest.cpp b/src/client/tests/cliTest.cpp
index 30f248b5418b54b1be26dfcf15348b03fd70af4d..c2799c7ff6cf75017fc4aacfca04caddf6189389 100644
--- a/src/client/tests/cliTest.cpp
+++ b/src/client/tests/cliTest.cpp
@@ -1,5 +1,6 @@
#include
#include
+#include
#include "taos.h"
#include "tglobal.h"
@@ -132,7 +133,7 @@ void validateResultFields() {
taos_free_result(res);
char sql[512] = {0};
- sprintf(sql, "insert into t1 values(%ld, 99, 'abc', 'test')", start_ts);
+ sprintf(sql, "insert into t1 values(%" PRId64 ", 99, 'abc', 'test')", start_ts);
res = taos_query(conn, sql);
ASSERT_EQ(taos_errno(res), 0);
diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp
index 3cc6d541e002a9167b5e2668d4914ad1aa6f94f0..f6de5d46a7c569ae5abe38f7e9cd2b6dc56a5586 100644
--- a/src/client/tests/timeParseTest.cpp
+++ b/src/client/tests/timeParseTest.cpp
@@ -1,8 +1,10 @@
-#include "os.h"
+
#include
#include
#include
+#include
+#include "os.h"
#include "taos.h"
#include "ttoken.h"
#include "tutil.h"
@@ -15,10 +17,10 @@ int main(int argc, char** argv) {
extern void deltaToUtcInitOnce();
/* test parse time function */
TEST(testCase, parse_time) {
-
+
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
deltaToUtcInitOnce();
-
+
char t1[] = "2018-1-1 1:1:1.952798";
char t13[] = "1970-1-1 0:0:0";
@@ -77,15 +79,15 @@ TEST(testCase, parse_time) {
taosParseTime(t12, &time1, strlen(t12), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, time1);
- taos_options(TSDB_OPTION_TIMEZONE, "UTC");
+ taos_options(TSDB_OPTION_TIMEZONE, "UTC");
deltaToUtcInitOnce();
-
+
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 0);
taos_options(TSDB_OPTION_TIMEZONE, "Asia/Shanghai");
deltaToUtcInitOnce();
-
+
char t14[] = "1970-1-1T0:0:0Z";
taosParseTime(t14, &time, strlen(t14), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 0);
@@ -135,7 +137,7 @@ TEST(testCase, parse_time) {
//======================== add some case ============================//
-
+
char b1[] = "9999-12-31 23:59:59.999";
taosParseTime(b1, &time, strlen(b1), TSDB_TIME_PRECISION_MILLI,0);
EXPECT_EQ(time, 253402271999999);
@@ -145,27 +147,27 @@ TEST(testCase, parse_time) {
taosParseTime(b2, &time, strlen(b2), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 1577811661321);
- taos_options(TSDB_OPTION_TIMEZONE, "America/New_York");
+ taos_options(TSDB_OPTION_TIMEZONE, "America/New_York");
deltaToUtcInitOnce();
-
+
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 18000 * MILLISECOND_PER_SECOND);
- taos_options(TSDB_OPTION_TIMEZONE, "Asia/Tokyo");
+ taos_options(TSDB_OPTION_TIMEZONE, "Asia/Tokyo");
deltaToUtcInitOnce();
-
+
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -32400 * MILLISECOND_PER_SECOND);
taos_options(TSDB_OPTION_TIMEZONE, "Asia/Shanghai");
deltaToUtcInitOnce();
-
+
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
char t[] = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
- printf("%ld\n", time);
+ printf("%" PRId64 "\n", time);
}
diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h
index be16e80124358012a079ada5f7cd689afa0f7b75..918763ebb4b92399872f10c0dd632689eaa08d1b 100644
--- a/src/common/inc/tcmdtype.h
+++ b/src/common/inc/tcmdtype.h
@@ -41,8 +41,10 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_TABLE, "create-table" )
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_FUNCTION, "create-function" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_DB, "drop-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_TABLE, "drop-table" )
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_FUNCTION, "drop-function" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_ACCT, "create-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_USER, "create-user" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_ACCT, "drop-acct" )
@@ -51,6 +53,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
+
TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
@@ -63,6 +66,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_QUERY, "kill-query" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_STREAM, "kill-stream" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_KILL_CONNECTION, "kill-connection" )
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_COMPACT_VNODE, "compact-vnode" )
// SQL below is for read operation
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_READ, "read" )
@@ -72,20 +76,19 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_STABLEVGROUP, "stable-vgroup" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MULTI_META, "multi-meta" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_HB, "heart-beat" )
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_FUNC, "retrieve-function" )
// SQL below for client local
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_LOCAL, "local" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DESCRIBE_TABLE, "describe-table" )
- TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_LOCALMERGE, "retrieve-localmerge" )
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_GLOBALMERGE, "retrieve-globalmerge" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
+ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_STABLE, "show-create-stable")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
- /*
- * build empty result instead of accessing dnode to fetch result
- * reset the client cache
- */
+ // build empty result instead of accessing dnode to fetch result reset the client cache
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 2268f133d654e3486581cf2ec44021e8e38618a8..a01c3775397e25849d9e8ff70409db7ac0af90ba 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -15,10 +15,7 @@
#ifndef _TD_DATA_FORMAT_H_
#define _TD_DATA_FORMAT_H_
-#include
-#include
-#include
-
+#include "os.h"
#include "talgo.h"
#include "ttype.h"
#include "tutil.h"
@@ -34,6 +31,14 @@ extern "C" {
memcpy(varDataVal(x), (str), __len); \
} while (0);
+#define STR_TO_NET_VARSTR(x, str) \
+ do { \
+ VarDataLenT __len = (VarDataLenT)strlen(str); \
+ *(VarDataLenT *)(x) = htons(__len); \
+ memcpy(varDataVal(x), (str), __len); \
+ } while (0);
+
+
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \
@@ -48,10 +53,10 @@ extern "C" {
// ----------------- TSDB COLUMN DEFINITION
typedef struct {
- int8_t type; // Column type
- int16_t colId; // column ID
- int16_t bytes; // column bytes
- int16_t offset; // point offset in SDataRow after the header part
+ int8_t type; // Column type
+ int16_t colId; // column ID
+ int16_t bytes; // column bytes (restore to int16_t in case of misuse)
+ uint16_t offset; // point offset in SDataRow after the header part.
} STColumn;
#define colType(col) ((col)->type)
@@ -162,10 +167,11 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
return 0;
}
}
+
// ----------------- Data row structure
/* A data row, the format is like below:
- * |<--------------------+--------------------------- len ---------------------------------->|
+ * |<------------------------------------------------ len ---------------------------------->|
* |<-- Head -->|<--------- flen -------------->| |
* +---------------------+---------------------------------+---------------------------------+
* | uint16_t | int16_t | | |
@@ -179,8 +185,9 @@ typedef void *SDataRow;
#define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
-#define dataRowLen(r) (*(uint16_t *)(r))
-#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
+#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535
+#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r))
+#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
#define dataRowKey(r) tdGetKey(dataRowTKey(r))
@@ -195,21 +202,24 @@ void tdFreeDataRow(SDataRow row);
void tdInitDataRow(SDataRow row, STSchema *pSchema);
SDataRow tdDataRowDup(SDataRow row);
+
// offset here not include dataRow header length
-static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) {
+static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type,
+ int32_t offset) {
ASSERT(value != NULL);
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
- char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row));
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
- memcpy(ptr, value, varDataTLen(value));
+ if (isCopyVarData) {
+ memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value));
+ }
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
- memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]);
+ memcpy(POINTER_SHIFT(row, toffset), (const void *)(&tvalue), TYPE_BYTES[type]);
} else {
memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
}
@@ -218,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i
return 0;
}
+
+// offset here not include dataRow header length
+static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) {
+ return tdAppendDataColVal(row, value, true, type, offset);
+}
+
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
if (IS_VAR_DATA_TYPE(type)) {
@@ -227,6 +243,83 @@ static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t o
}
}
+static FORCE_INLINE void *tdGetPtrToCol(SDataRow row, STSchema *pSchema, int idx) {
+ return POINTER_SHIFT(row, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset);
+}
+
+static FORCE_INLINE void *tdGetColOfRowBySchema(SDataRow row, STSchema *pSchema, int idx) {
+ int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
+ int8_t type = pSchema->columns[idx].type;
+
+ return tdGetRowDataOfCol(row, type, offset);
+}
+
+static FORCE_INLINE bool tdIsColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) {
+ int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
+ int8_t type = pSchema->columns[idx].type;
+
+ return isNull(tdGetRowDataOfCol(row, type, offset), type);
+}
+
+static FORCE_INLINE void tdSetColOfRowNullBySchema(SDataRow row, STSchema *pSchema, int idx) {
+ int16_t offset = TD_DATA_ROW_HEAD_SIZE + pSchema->columns[idx].offset;
+ int8_t type = pSchema->columns[idx].type;
+ int16_t bytes = pSchema->columns[idx].bytes;
+
+ setNull(tdGetRowDataOfCol(row, type, offset), type, bytes);
+}
+
+static FORCE_INLINE void tdCopyColOfRowBySchema(SDataRow dst, STSchema *pDstSchema, int dstIdx, SDataRow src, STSchema *pSrcSchema, int srcIdx) {
+ int8_t type = pDstSchema->columns[dstIdx].type;
+ ASSERT(type == pSrcSchema->columns[srcIdx].type);
+ void *pData = tdGetPtrToCol(dst, pDstSchema, dstIdx);
+ void *value = tdGetPtrToCol(src, pSrcSchema, srcIdx);
+
+ switch (type) {
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ *(VarDataOffsetT *)pData = *(VarDataOffsetT *)value;
+ pData = POINTER_SHIFT(dst, *(VarDataOffsetT *)pData);
+ value = POINTER_SHIFT(src, *(VarDataOffsetT *)value);
+ memcpy(pData, value, varDataTLen(value));
+ break;
+ case TSDB_DATA_TYPE_NULL:
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ *(uint8_t *)pData = *(uint8_t *)value;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ *(uint16_t *)pData = *(uint16_t *)value;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_UINT:
+ *(uint32_t *)pData = *(uint32_t *)value;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ *(uint64_t *)pData = *(uint64_t *)value;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ SET_FLOAT_PTR(pData, value);
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ SET_DOUBLE_PTR(pData, value);
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ *(TSKEY *)pData = tdGetKey(*(TKEY *)value);
+ } else {
+ *(TSKEY *)pData = *(TSKEY *)value;
+ }
+ break;
+ default:
+ memcpy(pData, value, pSrcSchema->columns[srcIdx].bytes);
+ }
+}
+
+
// ----------------- Data column structure
typedef struct SDataCol {
int8_t type; // column type
@@ -237,19 +330,25 @@ typedef struct SDataCol {
int len; // column data length
VarDataOffsetT *dataOff; // For binary and nchar data, the offset in the data column
void * pData; // Actual data pointer
+ TSKEY ts; // only used in last NULL column
} SDataCol;
+#define isAllRowsNull(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
-void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
-void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints);
+int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
+
+void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
+int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
-void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
// Get the data pointer from a column-wised data
-static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) {
+static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) {
+ if (isAllRowsNull(pCol)) {
+ return getNullValue(pCol->type);
+ }
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
@@ -268,20 +367,16 @@ static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) {
}
typedef struct {
- int maxRowSize;
- int maxCols; // max number of columns
- int maxPoints; // max number of points
- int bufSize;
-
- int numOfRows;
- int numOfCols; // Total number of cols
- int sversion; // TODO: set sversion
- void * buf;
+ int maxCols; // max number of columns
+ int maxPoints; // max number of points
+ int numOfRows;
+ int numOfCols; // Total number of cols
+ int sversion; // TODO: set sversion
SDataCol *cols;
} SDataCols;
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
-#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
+#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
@@ -291,6 +386,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
}
}
+static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) {
+ ASSERT(row < pCols->numOfRows);
+ return dataColsKeyAt(pCols, row);
+}
+
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
@@ -315,18 +415,18 @@ static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
}
}
-SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
+SDataCols *tdNewDataCols(int maxCols, int maxRows);
void tdResetDataCols(SDataCols *pCols);
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
SDataCols *tdFreeDataCols(SDataCols *pCols);
-void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
-int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
+int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull);
// ----------------- K-V data row structure
-/*
+/* |<-------------------------------------- len -------------------------------------------->|
+ * |<----- header ----->|<--------------------------- body -------------------------------->|
* +----------+----------+---------------------------------+---------------------------------+
- * | int16_t | int16_t | | |
+ * | uint16_t | int16_t | | |
* +----------+----------+---------------------------------+---------------------------------+
* | len | ncols | cols index | data part |
* +----------+----------+---------------------------------+---------------------------------+
@@ -334,14 +434,14 @@ int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
typedef void *SKVRow;
typedef struct {
- int16_t colId;
- int16_t offset;
+ int16_t colId;
+ uint16_t offset;
} SColIdx;
-#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t))
+#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t))
-#define kvRowLen(r) (*(int16_t *)(r))
-#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
+#define kvRowLen(r) (*(TDRowLenT *)(r))
+#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t)))
#define kvRowSetLen(r, len) kvRowLen(r) = (len)
#define kvRowSetNCols(r, n) kvRowNCols(r) = (n)
#define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE)
@@ -352,6 +452,9 @@ typedef struct {
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r))
+#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r)))
+#define kvRowKey(r) tdGetKey(kvRowTKey(r))
+#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
SKVRow tdKVRowDup(SKVRow row);
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
@@ -375,13 +478,64 @@ static FORCE_INLINE void *tdGetKVRowValOfCol(SKVRow row, int16_t colId) {
return kvRowColVal(row, (SColIdx *)ret);
}
+static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) {
+ return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ);
+}
+
+// offset here not include kvRow header length
+static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type,
+ int32_t offset) {
+ ASSERT(value != NULL);
+ int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE;
+ SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset);
+ char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row));
+
+ pColIdx->colId = colId;
+ pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE
+
+ if (IS_VAR_DATA_TYPE(type)) {
+ if (isCopyValData) {
+ memcpy(ptr, value, varDataTLen(value));
+ }
+ kvRowLen(row) += varDataTLen(value);
+ } else {
+ if (offset == 0) {
+ ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
+ TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
+ memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]);
+ } else {
+ memcpy(ptr, value, TYPE_BYTES[type]);
+ }
+ kvRowLen(row) += TYPE_BYTES[type];
+ }
+
+ return 0;
+}
+// NOTE: offset here including the header size
+static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); }
+
+static FORCE_INLINE void *tdGetKVRowValOfColEx(SKVRow row, int16_t colId, int32_t *nIdx) {
+ while (*nIdx < kvRowNCols(row)) {
+ SColIdx *pColIdx = kvRowColIdxAt(row, *nIdx);
+ if (pColIdx->colId == colId) {
+ ++(*nIdx);
+ return tdGetKvRowDataOfCol(row, pColIdx->offset);
+ } else if (pColIdx->colId > colId) {
+ return NULL;
+ } else {
+ ++(*nIdx);
+ }
+ }
+ return NULL;
+}
+
// ----------------- K-V data row builder
typedef struct {
int16_t tCols;
int16_t nCols;
SColIdx *pColIdx;
- int16_t alloc;
- int16_t size;
+ uint16_t alloc;
+ uint16_t size;
void * buf;
} SKVRowBuilder;
@@ -393,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
- if (pBuilder->pColIdx == NULL) return -1;
+ SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
+ if (pColIdx == NULL) return -1;
+ pBuilder->pColIdx = pColIdx;
}
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@@ -407,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2;
}
- pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
- if (pBuilder->buf == NULL) return -1;
+ void* buf = realloc(pBuilder->buf, pBuilder->alloc);
+ if (buf == NULL) return -1;
+ pBuilder->buf = buf;
}
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);
@@ -417,6 +573,242 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
return 0;
}
+// ----------------- SMemRow appended with sequential data row structure
+/*
+ * |---------|------------------------------------------------- len ---------------------------------->|
+ * |<-------- Head ------>|<--------- flen -------------->| |
+ * |---------+---------------------+---------------------------------+---------------------------------+
+ * | uint8_t | uint16_t | int16_t | | |
+ * |---------+----------+----------+---------------------------------+---------------------------------+
+ * | flag | len | sversion | First part | Second part |
+ * +---------+----------+----------+---------------------------------+---------------------------------+
+ *
+ * NOTE: timestamp in this row structure is TKEY instead of TSKEY
+ */
+
+// ----------------- SMemRow appended with extended K-V data row structure
+/* |--------------------|------------------------------------------------ len ---------------------------------->|
+ * |<------------- Head ------------>|<--------- flen -------------->| |
+ * |--------------------+----------+--------------------------------------------+---------------------------------+
+ * | uint8_t | int16_t | uint16_t | int16_t | | |
+ * |---------+----------+----------+----------+---------------------------------+---------------------------------+
+ * | flag | sversion | len | ncols | cols index | data part |
+ * |---------+----------+----------+----------+---------------------------------+---------------------------------+
+ */
+
+typedef void *SMemRow;
+
+#define TD_MEM_ROW_TYPE_SIZE sizeof(uint8_t)
+#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t)
+#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE)
+#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE)
+#define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE)
+
+#define SMEM_ROW_DATA 0x0U // SDataRow
+#define SMEM_ROW_KV 0x01U // SKVRow
+#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
+
+#define KVRatioKV (0.2f) // all bool
+#define KVRatioPredict (0.4f)
+#define KVRatioData (0.75f) // all bigint
+#define KVRatioConvert (0.9f)
+
+#define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
+
+#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
+#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
+#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
+#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
+#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
+#define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
+#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT)
+
+#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
+#define memRowKvBody(r) \
+ POINTER_SHIFT(r, TD_MEM_ROW_KV_TYPE_VER_SIZE) // section after flag + sversion as to reuse SKVRow
+
+#define memRowDataLen(r) (*(TDRowLenT *)memRowDataBody(r)) // 0~65535
+#define memRowKvLen(r) (*(TDRowLenT *)memRowKvBody(r)) // 0~65535
+
+#define memRowDataTLen(r) \
+ ((TDRowTLenT)(memRowDataLen(r) + TD_MEM_ROW_TYPE_SIZE)) // using uint32_t/int32_t to store the TLen
+
+#define memRowKvTLen(r) ((TDRowTLenT)(memRowKvLen(r) + TD_MEM_ROW_KV_TYPE_VER_SIZE))
+
+#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r))
+#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen
+
+static FORCE_INLINE char *memRowEnd(SMemRow row) {
+ if (isDataRow(row)) {
+ return (char *)dataRowEnd(memRowDataBody(row));
+ } else {
+ return (char *)kvRowEnd(memRowKvBody(row));
+ }
+}
+
+#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r))
+#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
+#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
+#define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v))
+#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r)))
+
+#define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r)))
+#define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r)))
+#define memRowSetTKey(r, k) \
+ do { \
+ if (isDataRow(r)) { \
+ dataRowTKey(memRowDataBody(r)) = (k); \
+ } else { \
+ kvRowTKey(memRowKvBody(r)) = (k); \
+ } \
+ } while (0)
+
+#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l))
+#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v))
+#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r))
+#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE)
+#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
+
+SMemRow tdMemRowDup(SMemRow row);
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull);
+
+// NOTE: offset here including the header size
+static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
+ if (isDataRow(row)) {
+ return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
+ } else {
+ return tdGetKVRowValOfCol(memRowKvBody(row), colId);
+ }
+}
+
+/**
+ * NOTE:
+ * 1. Applicable to scan columns one by one
+ * 2. offset here including the header size
+ */
+static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_t colType, int32_t offset,
+ int32_t *kvNIdx) {
+ if (isDataRow(row)) {
+ return tdGetRowDataOfCol(memRowDataBody(row), colType, offset);
+ } else {
+ return tdGetKVRowValOfColEx(memRowKvBody(row), colId, kvNIdx);
+ }
+}
+
+static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId,
+ int8_t type, int32_t offset) {
+ if (isDataRow(row)) {
+ tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset);
+ } else {
+ tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset);
+ }
+ return 0;
+}
+
+// make sure schema->flen appended for SDataRow
+static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value, int8_t colType) {
+ int32_t len = 0;
+ if (IS_VAR_DATA_TYPE(colType)) {
+ len += varDataTLen(value);
+ if (rowType == SMEM_ROW_KV) {
+ len += sizeof(SColIdx);
+ }
+ } else {
+ if (rowType == SMEM_ROW_KV) {
+ len += TYPE_BYTES[colType];
+ len += sizeof(SColIdx);
+ }
+ }
+ return len;
+}
+
+/**
+ * 1. calculate the delta of AllNullLen for SDataRow.
+ * 2. calculate the real len for SKVRow.
+ */
+static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) {
+ switch (colType) {
+ case TSDB_DATA_TYPE_BINARY: {
+ int32_t varLen = varDataLen(value);
+ *dataLen += (varLen - CHAR_BYTES);
+ *kvLen += (varLen + sizeof(SColIdx));
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR: {
+ int32_t varLen = varDataLen(value);
+ *dataLen += (varLen - TSDB_NCHAR_SIZE);
+ *kvLen += (varLen + sizeof(SColIdx));
+ break;
+ }
+ default: {
+ *kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx));
+ break;
+ }
+ }
+}
+
+typedef struct {
+ int16_t colId;
+ uint8_t colType;
+ char* colVal;
+} SColInfo;
+
+static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t colType, char* colVal) {
+ colInfo->colId = colId;
+ colInfo->colType = colType;
+ colInfo->colVal = colVal;
+}
+
+SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2);
+
+#if 0
+// ----------------- Raw payload structure for row:
+/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->|
+ * | |<----------------- flen ------------->|<--- value part --->|
+ * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... |
+ * +-----------+----------+----------+--------------------------------------|--------------------|
+ * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... |
+ * +-----------+----------+----------+--------------------------------------+--------------------|
+ * 1. offset in column data tuple starts from the value part in case of uint16_t overflow.
+ * 2. dataTLen: total length including the header and body.
+ */
+
+#define PAYLOAD_NCOLS_LEN sizeof(uint16_t)
+#define PAYLOAD_NCOLS_OFFSET (sizeof(uint8_t) + sizeof(TDRowTLenT))
+#define PAYLOAD_HEADER_LEN (PAYLOAD_NCOLS_OFFSET + PAYLOAD_NCOLS_LEN)
+#define PAYLOAD_ID_LEN sizeof(int16_t)
+#define PAYLOAD_ID_TYPE_LEN (sizeof(int16_t) + sizeof(uint8_t))
+#define PAYLOAD_COL_HEAD_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(uint16_t))
+#define PAYLOAD_PRIMARY_COL_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(TSKEY))
+
+#define payloadBody(r) POINTER_SHIFT(r, PAYLOAD_HEADER_LEN)
+#define payloadType(r) (*(uint8_t *)(r))
+#define payloadSetType(r, t) (payloadType(r) = (t))
+#define payloadTLen(r) (*(TDRowTLenT *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) // including total header
+#define payloadSetTLen(r, l) (payloadTLen(r) = (l))
+#define payloadNCols(r) (*(TDRowLenT *)POINTER_SHIFT(r, PAYLOAD_NCOLS_OFFSET))
+#define payloadSetNCols(r, n) (payloadNCols(r) = (n))
+#define payloadValuesOffset(r) \
+ (PAYLOAD_HEADER_LEN + payloadNCols(r) * PAYLOAD_COL_HEAD_LEN) // avoid using the macro in loop
+#define payloadValues(r) POINTER_SHIFT(r, payloadValuesOffset(r)) // avoid using the macro in loop
+#define payloadColId(c) (*(int16_t *)(c))
+#define payloadColType(c) (*(uint8_t *)POINTER_SHIFT(c, PAYLOAD_ID_LEN))
+#define payloadColOffset(c) (*(uint16_t *)POINTER_SHIFT(c, PAYLOAD_ID_TYPE_LEN))
+#define payloadColValue(c) POINTER_SHIFT(c, payloadColOffset(c))
+
+#define payloadColSetId(c, i) (payloadColId(c) = (i))
+#define payloadColSetType(c, t) (payloadColType(c) = (t))
+#define payloadColSetOffset(c, o) (payloadColOffset(c) = (o))
+
+#define payloadTSKey(r) (*(TSKEY *)POINTER_SHIFT(r, payloadValuesOffset(r)))
+#define payloadTKey(r) (*(TKEY *)POINTER_SHIFT(r, payloadValuesOffset(r)))
+#define payloadKey(r) tdGetKey(payloadTKey(r))
+
+
+static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); }
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h
index 9addea412b777e18390e7925f02dd2e09ad17a9b..2e49a69366c2277c98ec32a1d8419c141ddecc0f 100644
--- a/src/common/inc/texpr.h
+++ b/src/common/inc/texpr.h
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#ifndef TDENGINE_TAST_H
-#define TDENGINE_TAST_H
+#ifndef TDENGINE_TEXPR_H
+#define TDENGINE_TEXPR_H
#ifdef __cplusplus
extern "C" {
@@ -62,38 +62,42 @@ typedef struct tExprNode {
uint8_t nodeType;
union {
struct {
- uint8_t optr; // filter operator
- uint8_t hasPK; // 0: do not contain primary filter, 1: contain
- void * info; // support filter operation on this expression only available for leaf node
-
+ uint8_t optr; // filter operator
+ uint8_t hasPK; // 0: do not contain primary filter, 1: contain
+ void *info; // support filter operation on this expression only available for leaf node
struct tExprNode *pLeft; // left child pointer
struct tExprNode *pRight; // right child pointer
} _node;
- struct SSchema *pSchema;
- tVariant * pVal;
+
+ struct SSchema *pSchema;
+ tVariant *pVal;
};
} tExprNode;
typedef struct SExprTraverseSupp {
__result_filter_fn_t nodeFilterFn;
__do_filter_suppl_fn_t setupInfoFn;
- void * pExtInfo;
+ void *pExtInfo;
} SExprTraverseSupp;
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
+void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
+tExprNode* exprdup(tExprNode* pTree);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
-bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
+bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
+void buildFilterSetFromBinary(void **q, const char *buf, int32_t len);
+
#ifdef __cplusplus
}
#endif
-#endif // TDENGINE_TAST_H
+#endif // TDENGINE_TEXPR_H
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 07d614d5ef26aca4ef083c3c73695fc44071ffa7..a384cf6e70485b8f7d1b06b7f7e86ba92776b547 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -45,6 +45,7 @@ extern int32_t tsDnodeId;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
@@ -58,6 +59,7 @@ extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
+extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
//query buffer management
@@ -69,6 +71,7 @@ extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsMaxSQLStringLen;
+extern int32_t tsMaxWildCardsLen;
extern int8_t tsTscEnableRecordSql;
extern int32_t tsMaxNumOfOrderedResults;
extern int32_t tsMinSlidingTime;
@@ -122,6 +125,7 @@ extern int32_t tsHttpMaxThreads;
extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum;
+extern int8_t tsHttpDbNameMandatory;
// mqtt
extern int8_t tsEnableMqttModule;
@@ -142,16 +146,20 @@ extern int32_t tsMonitorInterval;
extern int8_t tsEnableStream;
// internal
+extern int8_t tsCompactMnodeWal;
extern int8_t tsPrintAuth;
extern int8_t tscEmbedded;
extern char configDir[];
extern char tsVnodeDir[];
extern char tsDnodeDir[];
extern char tsMnodeDir[];
+extern char tsMnodeBakDir[];
+extern char tsMnodeTmpDir[];
extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
-extern int64_t tsMsPerDay[3];
+extern int64_t tsTickPerDay[3];
+extern int32_t tsTopicBianryLen;
// system info
extern char tsOsName[];
@@ -201,6 +209,16 @@ extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;
+#ifdef TD_TSZ
+// lossy
+extern char lossyColumns[];
+extern double fPrecision;
+extern double dPrecision;
+extern uint32_t maxRange;
+extern uint32_t curRange;
+extern char Compressor[];
+#endif
+
typedef struct {
char dir[TSDB_FILENAME_LEN];
int level;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index cacd6d2ae7c2200f0f01d7adea81c8d2510ae9dc..b29a535ec2c80f7fb058e3d1c55e5d16ed71c3c4 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -41,6 +41,35 @@ typedef struct SResPair {
double avg;
} SResPair;
+// the structure for sql function in select clause
+typedef struct SSqlExpr {
+ char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
+ char token[TSDB_COL_NAME_LEN]; // original token
+ SColIndex colInfo;
+ uint64_t uid; // table uid, todo refactor use the pointer
+
+ int16_t functionId; // function id in aAgg array
+
+ int16_t resType; // return value type
+ int16_t resBytes; // length of return value
+ int32_t interBytes; // inter result buffer size
+
+ int16_t colType; // table column type
+ int16_t colBytes; // table column bytes
+
+ int16_t numOfParams; // argument value of each function
+ tVariant param[3]; // parameters are not more than 3
+ int32_t offset; // sub result column value of arithmetic expression.
+ int16_t resColId; // result column id
+
+ SColumnFilterList flist;
+} SSqlExpr;
+
+typedef struct SExprInfo {
+ SSqlExpr base;
+ struct tExprNode *pExpr;
+} SExprInfo;
+
#define TSDB_DB_NAME_T 1
#define TSDB_TABLE_NAME_T 2
@@ -63,10 +92,6 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len);
void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
-//SSchema tGetTbnameColumnSchema();
-
-SSchema tGetBlockDistColumnSchema();
-
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name);
bool tscValidateTableNameLength(size_t len);
diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c
index 1cb667d259f040cfab0656562f7c97444fc48d8a..3779303e1a41275996c52f828d433d2d68805fdf 100644
--- a/src/common/src/tarithoperator.c
+++ b/src/common/src/tarithoperator.c
@@ -18,7 +18,58 @@
#include "ttype.h"
#include "tutil.h"
#include "tarithoperator.h"
+#include "tcompare.h"
+//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i]));
+#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
+ { \
+ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
+ int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \
+ \
+ if ((len1) == (len2)) { \
+ for (; i < (len2) && i >= 0; i += step, (out) += 1) { \
+ if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ *(out) = (double)(left)[i] op(right)[i]; \
+ } \
+ } else if ((len1) == 1) { \
+ for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
+ if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ *(out) = (double)(left)[0] op(right)[i]; \
+ } \
+ } else if ((len2) == 1) { \
+ for (; i >= 0 && i < (len1); i += step, (out) += 1) { \
+ if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
+ *(out) = (double)(left)[i] op(right)[0]; \
+ } \
+ } \
+ }
#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
{ \
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
@@ -62,6 +113,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
*(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \
} \
} else if (len1 == 1) { \
@@ -70,6 +127,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
*(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \
} \
} else if ((len2) == 1) { \
@@ -78,6 +141,12 @@
SET_DOUBLE_NULL(out); \
continue; \
} \
+ double v, z = 0.0; \
+ GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
+ SET_DOUBLE_NULL(out); \
+ continue; \
+ } \
*(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \
} \
} \
@@ -90,7 +159,7 @@
#define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \
ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord)
#define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord)
+ ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord)
#define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \
ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord)
@@ -2569,6 +2638,7 @@ _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) {
case TSDB_BINARY_OP_REMAINDER:
return vectorRemainder;
default:
+ assert(0);
return NULL;
}
}
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 335c2b0a9265c4a6e834ecca8946c7df44e5d4d0..731dfecfb663be11851857a091e10381cae761ee 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -17,9 +17,33 @@
#include "talgo.h"
#include "tcoding.h"
#include "wchar.h"
+#include "tarray.h"
+static void dataColSetNEleNull(SDataCol *pCol, int nEle);
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
- int limit2, int tRows);
+ int limit2, int tRows, bool forceSetNull);
+
+int tdAllocMemForCol(SDataCol *pCol, int maxPoints) {
+ int spaceNeeded = pCol->bytes * maxPoints;
+ if(IS_VAR_DATA_TYPE(pCol->type)) {
+ spaceNeeded += sizeof(VarDataOffsetT) * maxPoints;
+ }
+ if(pCol->spaceSize < spaceNeeded) {
+ void* ptr = realloc(pCol->pData, spaceNeeded);
+ if(ptr == NULL) {
+ uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)spaceNeeded,
+ strerror(errno));
+ return -1;
+ } else {
+ pCol->pData = ptr;
+ pCol->spaceSize = spaceNeeded;
+ }
+ }
+ if(IS_VAR_DATA_TYPE(pCol->type)) {
+ pCol->dataOff = POINTER_SHIFT(pCol->pData, pCol->bytes * maxPoints);
+ }
+ return 0;
+}
/**
* Duplicate the schema and return a new object
@@ -114,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
- if (pBuilder->columns == NULL) return -1;
+ STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
+ if (columns == NULL) return -1;
+ pBuilder->columns = columns;
}
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
@@ -198,30 +223,39 @@ SDataRow tdDataRowDup(SDataRow row) {
return trow;
}
-void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) {
+SMemRow tdMemRowDup(SMemRow row) {
+ SMemRow trow = malloc(memRowTLen(row));
+ if (trow == NULL) return NULL;
+
+ memRowCpy(trow, row);
+ return trow;
+}
+
+void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->type = colType(pCol);
pDataCol->colId = colColId(pCol);
pDataCol->bytes = colBytes(pCol);
pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0;
- if (IS_VAR_DATA_TYPE(pDataCol->type)) {
- pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
- pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
- pDataCol->spaceSize = pDataCol->bytes * maxPoints;
- *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize + sizeof(VarDataOffsetT) * maxPoints);
- } else {
- pDataCol->spaceSize = pDataCol->bytes * maxPoints;
- pDataCol->dataOff = NULL;
- pDataCol->pData = *pBuf;
- *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize);
- }
}
-
// value from timestamp should be TKEY here instead of TSKEY
-void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) {
+int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
+ if (isAllRowsNull(pCol)) {
+ if (isNull(value, pCol->type)) {
+ // all null value yet, just return
+ return 0;
+ }
+
+ if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
+ if (numOfRows > 0) {
+ // Find the first not null value, fill all previouse values as NULL
+ dataColSetNEleNull(pCol, numOfRows);
+ }
+ }
+
if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
@@ -234,16 +268,26 @@ void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints)
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
}
+ return 0;
+}
+
+static FORCE_INLINE const void *tdGetColDataOfRowUnsafe(SDataCol *pCol, int row) {
+ if (IS_VAR_DATA_TYPE(pCol->type)) {
+ return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
+ } else {
+ return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
+ }
}
bool isNEleNull(SDataCol *pCol, int nEle) {
+ if(isAllRowsNull(pCol)) return true;
for (int i = 0; i < nEle; i++) {
- if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
+ if (!isNull(tdGetColDataOfRowUnsafe(pCol, i), pCol->type)) return false;
}
return true;
}
-void dataColSetNullAt(SDataCol *pCol, int index) {
+static FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->dataOff[index] = pCol->len;
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
@@ -255,8 +299,7 @@ void dataColSetNullAt(SDataCol *pCol, int index) {
}
}
-void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
-
+static void dataColSetNEleNull(SDataCol *pCol, int nEle) {
if (IS_VAR_DATA_TYPE(pCol->type)) {
pCol->len = 0;
for (int i = 0; i < nEle; i++) {
@@ -282,7 +325,7 @@ void dataColSetOffset(SDataCol *pCol, int nEle) {
}
}
-SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
+SDataCols *tdNewDataCols(int maxCols, int maxRows) {
SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols));
if (pCols == NULL) {
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
@@ -290,6 +333,9 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
}
pCols->maxPoints = maxRows;
+ pCols->maxCols = maxCols;
+ pCols->numOfRows = 0;
+ pCols->numOfCols = 0;
if (maxCols > 0) {
pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol));
@@ -299,20 +345,12 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
tdFreeDataCols(pCols);
return NULL;
}
-
- pCols->maxCols = maxCols;
- }
-
- pCols->maxRowSize = maxRowSize;
- pCols->bufSize = maxRowSize * maxRows;
-
- if (pCols->bufSize > 0) {
- pCols->buf = malloc(pCols->bufSize);
- if (pCols->buf == NULL) {
- uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols,
- strerror(errno));
- tdFreeDataCols(pCols);
- return NULL;
+ int i;
+ for(i = 0; i < maxCols; i++) {
+ pCols->cols[i].spaceSize = 0;
+ pCols->cols[i].len = 0;
+ pCols->cols[i].pData = NULL;
+ pCols->cols[i].dataOff = NULL;
}
}
@@ -320,42 +358,49 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
}
int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
- if (schemaNCols(pSchema) > pCols->maxCols) {
+ int i;
+ int oldMaxCols = pCols->maxCols;
+ if (schemaNCols(pSchema) > oldMaxCols) {
pCols->maxCols = schemaNCols(pSchema);
- pCols->cols = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
- if (pCols->cols == NULL) return -1;
- }
-
- if (schemaTLen(pSchema) > pCols->maxRowSize) {
- pCols->maxRowSize = schemaTLen(pSchema);
- pCols->bufSize = schemaTLen(pSchema) * pCols->maxPoints;
- pCols->buf = realloc(pCols->buf, pCols->bufSize);
- if (pCols->buf == NULL) return -1;
+ void* ptr = (SDataCol *)realloc(pCols->cols, sizeof(SDataCol) * pCols->maxCols);
+ if (ptr == NULL) return -1;
+ pCols->cols = ptr;
+ for(i = oldMaxCols; i < pCols->maxCols; i++) {
+ pCols->cols[i].pData = NULL;
+ pCols->cols[i].dataOff = NULL;
+ pCols->cols[i].spaceSize = 0;
+ }
}
tdResetDataCols(pCols);
pCols->numOfCols = schemaNCols(pSchema);
- void *ptr = pCols->buf;
- for (int i = 0; i < schemaNCols(pSchema); i++) {
- dataColInit(pCols->cols + i, schemaColAt(pSchema, i), &ptr, pCols->maxPoints);
- ASSERT((char *)ptr - (char *)(pCols->buf) <= pCols->bufSize);
+ for (i = 0; i < schemaNCols(pSchema); i++) {
+ dataColInit(pCols->cols + i, schemaColAt(pSchema, i), pCols->maxPoints);
}
return 0;
}
SDataCols *tdFreeDataCols(SDataCols *pCols) {
+ int i;
if (pCols) {
- tfree(pCols->buf);
- tfree(pCols->cols);
+ if(pCols->cols) {
+ int maxCols = pCols->maxCols;
+ for(i = 0; i < maxCols; i++) {
+ SDataCol *pCol = &pCols->cols[i];
+ tfree(pCol->pData);
+ }
+ free(pCols->cols);
+ pCols->cols = NULL;
+ }
free(pCols);
}
return NULL;
}
SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
- SDataCols *pRet = tdNewDataCols(pDataCols->maxRowSize, pDataCols->maxCols, pDataCols->maxPoints);
+ SDataCols *pRet = tdNewDataCols(pDataCols->maxCols, pDataCols->maxPoints);
if (pRet == NULL) return NULL;
pRet->numOfCols = pDataCols->numOfCols;
@@ -368,21 +413,17 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].bytes = pDataCols->cols[i].bytes;
pRet->cols[i].offset = pDataCols->cols[i].offset;
- pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize;
- pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf)));
-
- if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
- ASSERT(pDataCols->cols[i].dataOff != NULL);
- pRet->cols[i].dataOff =
- (int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf)));
- }
-
if (keepData) {
- pRet->cols[i].len = pDataCols->cols[i].len;
if (pDataCols->cols[i].len > 0) {
+ if(tdAllocMemForCol(&pRet->cols[i], pRet->maxPoints) < 0) {
+ tdFreeDataCols(pRet);
+ return NULL;
+ }
+ pRet->cols[i].len = pDataCols->cols[i].len;
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
- memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints);
+ int dataOffSize = sizeof(VarDataOffsetT) * pDataCols->maxPoints;
+ memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, dataOffSize);
}
}
}
@@ -400,72 +441,117 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
-void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
+static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
int dcol = 0;
- if (dataRowDeleted(row)) {
- for (; dcol < pCols->numOfCols; dcol++) {
- SDataCol *pDataCol = &(pCols->cols[dcol]);
- if (dcol == 0) {
- dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints);
- } else {
- dataColSetNullAt(pDataCol, pCols->numOfRows);
- }
+ while (dcol < pCols->numOfCols) {
+ bool setCol = 0;
+ SDataCol *pDataCol = &(pCols->cols[dcol]);
+ if (rcol >= schemaNCols(pSchema)) {
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ dcol++;
+ continue;
}
- } else {
- while (dcol < pCols->numOfCols) {
- SDataCol *pDataCol = &(pCols->cols[dcol]);
- if (rcol >= schemaNCols(pSchema)) {
- dataColSetNullAt(pDataCol, pCols->numOfRows);
- dcol++;
- continue;
+
+ STColumn *pRowCol = schemaColAt(pSchema, rcol);
+ if (pRowCol->colId == pDataCol->colId) {
+ void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
+ if(!isNull(value, pDataCol->type)) setCol = 1;
+ dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
+ dcol++;
+ rcol++;
+ } else if (pRowCol->colId < pDataCol->colId) {
+ rcol++;
+ } else {
+ if(forceSetNull || setCol) {
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
+ dcol++;
+ }
+ }
+ pCols->numOfRows++;
+}
+
+static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
+ ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
- STColumn *pRowCol = schemaColAt(pSchema, rcol);
- if (pRowCol->colId == pDataCol->colId) {
- void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
- dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
- dcol++;
- rcol++;
- } else if (pRowCol->colId < pDataCol->colId) {
- rcol++;
- } else {
- dataColSetNullAt(pDataCol, pCols->numOfRows);
- dcol++;
+ int rcol = 0;
+ int dcol = 0;
+
+ int nRowCols = kvRowNCols(row);
+
+ while (dcol < pCols->numOfCols) {
+ bool setCol = 0;
+ SDataCol *pDataCol = &(pCols->cols[dcol]);
+ if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ ++dcol;
+ continue;
+ }
+
+ SColIdx *colIdx = kvRowColIdxAt(row, rcol);
+
+ if (colIdx->colId == pDataCol->colId) {
+ void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
+ if(!isNull(value, pDataCol->type)) setCol = 1;
+ dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
+ ++dcol;
+ ++rcol;
+ } else if (colIdx->colId < pDataCol->colId) {
+ ++rcol;
+ } else {
+ if (forceSetNull || setCol) {
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
+ ++dcol;
}
}
pCols->numOfRows++;
}
-int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
+ if (isDataRow(row)) {
+ tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull);
+ } else if (isKvRow(row)) {
+ tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull);
+ } else {
+ ASSERT(0);
+ }
+}
+
+int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols);
+ int offset = 0;
+
+ if (pOffset == NULL) {
+ pOffset = &offset;
+ }
SDataCols *pTarget = NULL;
- if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap
+ if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
- if (source->cols[j].len > 0) {
- dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfRows,
+ if (source->cols[j].len > 0 || target->cols[j].len > 0) {
+ dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
target->maxPoints);
}
}
target->numOfRows++;
}
+ (*pOffset) += rowsToMerge;
} else {
pTarget = tdDupDataCols(target, true);
if (pTarget == NULL) goto _err;
int iter1 = 0;
- int iter2 = 0;
- tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, &iter2, source->numOfRows,
- pTarget->numOfRows + rowsToMerge);
+ tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows,
+ pTarget->numOfRows + rowsToMerge, forceSetNull);
}
tdFreeDataCols(pTarget);
@@ -478,7 +564,7 @@ _err:
// src2 data has more priority than src1
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
- int limit2, int tRows) {
+ int limit2, int tRows, bool forceSetNull) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
@@ -495,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if (key1 < key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
- if (src1->cols[i].len > 0) {
+ if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
}
@@ -507,9 +593,14 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
- if (src2->cols[i].len > 0) {
+ if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
+ } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
+ dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
+ target->maxPoints);
+ } else if(target->cols[i].len > 0) {
+ dataColSetNullAt(&target->cols[i], target->numOfRows);
}
}
target->numOfRows++;
@@ -681,3 +772,97 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
return row;
}
+
+SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2) {
+#if 0
+ ASSERT(memRowKey(row1) == memRowKey(row2));
+ ASSERT(schemaVersion(pSchema1) == memRowVersion(row1));
+ ASSERT(schemaVersion(pSchema2) == memRowVersion(row2));
+ ASSERT(schemaVersion(pSchema1) >= schemaVersion(pSchema2));
+#endif
+
+ SArray *stashRow = taosArrayInit(pSchema1->numOfCols, sizeof(SColInfo));
+ if (stashRow == NULL) {
+ return NULL;
+ }
+
+ SMemRow pRow = buffer;
+ SDataRow dataRow = memRowDataBody(pRow);
+ memRowSetType(pRow, SMEM_ROW_DATA);
+ dataRowSetVersion(dataRow, schemaVersion(pSchema1)); // use latest schema version
+ dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pSchema1->flen));
+
+ TDRowTLenT dataLen = 0, kvLen = TD_MEM_ROW_KV_HEAD_SIZE;
+
+ int32_t i = 0; // row1
+ int32_t j = 0; // row2
+ int32_t nCols1 = schemaNCols(pSchema1);
+ int32_t nCols2 = schemaNCols(pSchema2);
+ SColInfo colInfo = {0};
+ int32_t kvIdx1 = 0, kvIdx2 = 0;
+
+ while (i < nCols1) {
+ STColumn *pCol = schemaColAt(pSchema1, i);
+ void * val1 = tdGetMemRowDataOfColEx(row1, pCol->colId, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset, &kvIdx1);
+ // if val1 != NULL, use val1;
+ if (val1 != NULL && !isNull(val1, pCol->type)) {
+ tdAppendColVal(dataRow, val1, pCol->type, pCol->offset);
+ kvLen += tdGetColAppendLen(SMEM_ROW_KV, val1, pCol->type);
+ setSColInfo(&colInfo, pCol->colId, pCol->type, val1);
+ taosArrayPush(stashRow, &colInfo);
+ ++i; // next col
+ continue;
+ }
+
+ void *val2 = NULL;
+ while (j < nCols2) {
+ STColumn *tCol = schemaColAt(pSchema2, j);
+ if (tCol->colId < pCol->colId) {
+ ++j;
+ continue;
+ }
+ if (tCol->colId == pCol->colId) {
+ val2 = tdGetMemRowDataOfColEx(row2, tCol->colId, tCol->type, TD_DATA_ROW_HEAD_SIZE + tCol->offset, &kvIdx2);
+ } else if (tCol->colId > pCol->colId) {
+ // set NULL
+ }
+ break;
+ } // end of while(jtype);
+ }
+ tdAppendColVal(dataRow, val2, pCol->type, pCol->offset);
+ if (!isNull(val2, pCol->type)) {
+ kvLen += tdGetColAppendLen(SMEM_ROW_KV, val2, pCol->type);
+ setSColInfo(&colInfo, pCol->colId, pCol->type, val2);
+ taosArrayPush(stashRow, &colInfo);
+ }
+
+ ++i; // next col
+ }
+
+ dataLen = memRowTLen(pRow);
+
+ if (kvLen < dataLen) {
+ // scan stashRow and generate SKVRow
+ memset(buffer, 0, sizeof(dataLen));
+ SMemRow tRow = buffer;
+ memRowSetType(tRow, SMEM_ROW_KV);
+ SKVRow kvRow = (SKVRow)memRowKvBody(tRow);
+ int16_t nKvNCols = (int16_t) taosArrayGetSize(stashRow);
+ kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nKvNCols));
+ kvRowSetNCols(kvRow, nKvNCols);
+ memRowSetKvVersion(tRow, pSchema1->version);
+
+ int32_t toffset = 0;
+ int16_t k;
+ for (k = 0; k < nKvNCols; ++k) {
+ SColInfo *pColInfo = taosArrayGet(stashRow, k);
+ tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset);
+ toffset += sizeof(SColIdx);
+ }
+ ASSERT(kvLen == memRowTLen(tRow));
+ }
+ taosArrayDestroy(stashRow);
+ return buffer;
+}
diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c
index 1008c4cf8f77ca77f59a57aea189cdebef9c9129..973b88fef9357b5e4b17ad0f9266d18cb98c2b1a 100644
--- a/src/common/src/texpr.c
+++ b/src/common/src/texpr.c
@@ -13,8 +13,10 @@
* along with this program. If not, see .
*/
+#include
#include "os.h"
+#include "texpr.h"
#include "exception.h"
#include "taosdef.h"
#include "taosmsg.h"
@@ -145,25 +147,25 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
*pExpr = NULL;
}
-bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) {
+bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) {
tExprNode *pLeft = pExpr->_node.pLeft;
tExprNode *pRight = pExpr->_node.pRight;
//non-leaf nodes, recursively traverse the expression tree in the post-root order
if (pLeft->nodeType == TSQL_NODE_EXPR && pRight->nodeType == TSQL_NODE_EXPR) {
if (pExpr->_node.optr == TSDB_RELATION_OR) { // or
- if (exprTreeApplayFilter(pLeft, pItem, param)) {
+ if (exprTreeApplyFilter(pLeft, pItem, param)) {
return true;
}
// left child does not satisfy the query condition, try right child
- return exprTreeApplayFilter(pRight, pItem, param);
+ return exprTreeApplyFilter(pRight, pItem, param);
} else { // and
- if (!exprTreeApplayFilter(pLeft, pItem, param)) {
+ if (!exprTreeApplyFilter(pLeft, pItem, param)) {
return false;
}
- return exprTreeApplayFilter(pRight, pItem, param);
+ return exprTreeApplyFilter(pRight, pItem, param);
}
}
@@ -463,3 +465,66 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
CLEANUP_EXECUTE_TO(anchor, false);
return expr;
}
+
+void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
+ SBufferReader br = tbufInitReader(buf, len, false);
+ uint32_t type = tbufReadUint32(&br);
+ SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(type), true, false);
+
+ taosHashSetEqualFp(pObj, taosGetDefaultEqualFunction(type));
+
+ int dummy = -1;
+ int32_t sz = tbufReadInt32(&br);
+ for (int32_t i = 0; i < sz; i++) {
+ if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) {
+ int64_t val = tbufReadInt64(&br);
+ taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
+ } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
+ uint64_t val = tbufReadUint64(&br);
+ taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
+ }
+ else if (type == TSDB_DATA_TYPE_TIMESTAMP) {
+ int64_t val = tbufReadInt64(&br);
+ taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
+ } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
+ double val = tbufReadDouble(&br);
+ taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
+ } else if (type == TSDB_DATA_TYPE_BINARY) {
+ size_t t = 0;
+ const char *val = tbufReadBinary(&br, &t);
+ taosHashPut(pObj, (char *)val, t, &dummy, sizeof(dummy));
+ } else if (type == TSDB_DATA_TYPE_NCHAR) {
+ size_t t = 0;
+ const char *val = tbufReadBinary(&br, &t);
+ taosHashPut(pObj, (char *)val, t, &dummy, sizeof(dummy));
+ }
+ }
+ *q = (void *)pObj;
+}
+
+tExprNode* exprdup(tExprNode* pNode) {
+ if (pNode == NULL) {
+ return NULL;
+ }
+
+ tExprNode* pCloned = calloc(1, sizeof(tExprNode));
+ if (pNode->nodeType == TSQL_NODE_EXPR) {
+ tExprNode* pLeft = exprdup(pNode->_node.pLeft);
+ tExprNode* pRight = exprdup(pNode->_node.pRight);
+
+ pCloned->_node.pLeft = pLeft;
+ pCloned->_node.pRight = pRight;
+ pCloned->_node.optr = pNode->_node.optr;
+ pCloned->_node.hasPK = pNode->_node.hasPK;
+ } else if (pNode->nodeType == TSQL_NODE_VALUE) {
+ pCloned->pVal = calloc(1, sizeof(tVariant));
+ tVariantAssign(pCloned->pVal, pNode->pVal);
+ } else if (pNode->nodeType == TSQL_NODE_COL) {
+ pCloned->pSchema = calloc(1, sizeof(SSchema));
+ *pCloned->pSchema = *pNode->pSchema;
+ }
+
+ pCloned->nodeType = pNode->nodeType;
+ return pCloned;
+}
+
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 198d57b6b99badbf1d8d2c501bef20183b34a1d3..8ab24bdde2a2cc7063f0c70efe56ede811fc139d 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -25,6 +25,7 @@
#include "tutil.h"
#include "tlocale.h"
#include "ttimezone.h"
+#include "tcompare.h"
// TSDB
bool tsdbForceKeepFile = false;
@@ -52,6 +53,7 @@ int32_t tsDnodeId = 0;
// common
int32_t tsRpcTimer = 300;
int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
@@ -77,17 +79,21 @@ int32_t tsCompressMsgSize = -1;
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
+int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
-int32_t tsMaxNumOfOrderedResults = 100000;
+int32_t tsMaxNumOfOrderedResults = 1000000;
// 10 ms for sliding time, the value will changed in case of time precision changed
int32_t tsMinSlidingTime = 10;
-// 10 ms for interval time range, changed accordingly
-int32_t tsMinIntervalTime = 10;
+// the maxinum number of distict query result
+int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
+
+// 1 us for interval time range, changed accordingly
+int32_t tsMinIntervalTime = 1;
// 20sec, the maximum value of stream computing delay, changed accordingly
int32_t tsMaxStreamComputDelay = 20000;
@@ -159,6 +165,7 @@ int32_t tsHttpMaxThreads = 2;
int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0;
+int8_t tsHttpDbNameMandatory = 0;
// mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
@@ -179,17 +186,21 @@ int32_t tsMonitorInterval = 30; // seconds
int8_t tsEnableStream = 1;
// internal
+int8_t tsCompactMnodeWal = 0;
int8_t tsPrintAuth = 0;
int8_t tscEmbedded = 0;
-char configDir[TSDB_FILENAME_LEN] = {0};
-char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
-char tsDataDir[TSDB_FILENAME_LEN] = {0};
-char tsScriptDir[TSDB_FILENAME_LEN] = {0};
-char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
+char configDir[PATH_MAX] = {0};
+char tsVnodeDir[PATH_MAX] = {0};
+char tsDnodeDir[PATH_MAX] = {0};
+char tsMnodeDir[PATH_MAX] = {0};
+char tsMnodeTmpDir[PATH_MAX] = {0};
+char tsMnodeBakDir[PATH_MAX] = {0};
+char tsDataDir[PATH_MAX] = {0};
+char tsScriptDir[PATH_MAX] = {0};
+char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0;
+int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
SDiskCfg tsDiskCfg[1];
@@ -203,7 +214,7 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS];
* TSDB_TIME_PRECISION_MICRO: 86400000000L
* TSDB_TIME_PRECISION_NANO: 86400000000000L
*/
-int64_t tsMsPerDay[] = {86400000L, 86400000000L, 86400000000000L};
+int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L};
// system info
char tsOsName[10] = "Linux";
@@ -243,6 +254,19 @@ int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 131;
int32_t fsDebugFlag = 135;
+#ifdef TD_TSZ
+//
+// lossy compress 6
+//
+char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress.
+// below option can take effect when tsLossyColumns not empty
+double fPrecision = 1E-8; // float column precision
+double dPrecision = 1E-16; // double column precision
+uint32_t maxRange = 500; // max range
+uint32_t curRange = 100; // range
+char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
+#endif
+
int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL;
void (*monExecuteSQLFp)(char *sql) = NULL;
@@ -527,6 +551,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "maxNumOfDistinctRes";
+ cfg.ptr = &tsMaxNumOfDistinctResults;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 10*10000;
+ cfg.maxValue = 10000*10000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -629,6 +663,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_MS;
taosInitConfigOption(cfg);
+ cfg.option = "rpcForceTcp";
+ cfg.ptr = &tsRpcForceTcp;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "rpcMaxTime";
cfg.ptr = &tsRpcMaxTime;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -945,7 +989,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
- cfg.maxValue = 10000000;
+ cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -960,12 +1004,22 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);
+ cfg.option = "maxWildCardsLength";
+ cfg.ptr = &tsMaxWildCardsLen;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = TSDB_MAX_FIELD_LEN;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_BYTE;
+ taosInitConfigOption(cfg);
+
cfg.option = "maxNumOfOrderedRes";
cfg.ptr = &tsMaxNumOfOrderedResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
- cfg.minValue = TSDB_MAX_SQL_LEN;
- cfg.maxValue = TSDB_MAX_ALLOWED_SQL_LEN;
+ cfg.minValue = 100000;
+ cfg.maxValue = 100000000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
@@ -1164,6 +1218,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "topicBinaryLen";
+ cfg.ptr = &tsTopicBianryLen;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 16;
+ cfg.maxValue = 16000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1204,6 +1268,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "httpDbNameMandatory";
+ cfg.ptr = &tsHttpDbNameMandatory;
+ cfg.valType = TAOS_CFG_VTYPE_INT8;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// debug flag
cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines;
@@ -1506,6 +1580,63 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = tListLen(tsTempDir);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+
+#ifdef TD_TSZ
+ // lossy compress
+ cfg.option = "lossyColumns";
+ cfg.ptr = lossyColumns;
+ cfg.valType = TAOS_CFG_VTYPE_STRING;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 0;
+ cfg.ptrLength = tListLen(lossyColumns);
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
+ cfg.option = "fPrecision";
+ cfg.ptr = &fPrecision;
+ cfg.valType = TAOS_CFG_VTYPE_DOUBLE;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = MIN_FLOAT;
+ cfg.maxValue = MAX_FLOAT;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
+ cfg.option = "dPrecision";
+ cfg.ptr = &dPrecision;
+ cfg.valType = TAOS_CFG_VTYPE_DOUBLE;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = MIN_FLOAT;
+ cfg.maxValue = MAX_FLOAT;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
+ cfg.option = "maxRange";
+ cfg.ptr = &maxRange;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 65536;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
+ cfg.option = "range";
+ cfg.ptr = &curRange;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 65536;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+ assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM);
+#else
+ assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5);
+#endif
+
}
void taosInitGlobalCfg() {
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 65725455e8b35a34f14ede4cbd6e9ef227ed0cfb..5da48b2e9ac9e8bdaf5158ae780379c913275780 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -33,15 +33,6 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len) {
return strlen(prefix);
}
-SSchema tGetBlockDistColumnSchema() {
- SSchema s = {0};
- s.bytes = TSDB_MAX_BINARY_LEN;;
- s.type = TSDB_DATA_TYPE_BINARY;
- s.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
- tstrncpy(s.name, TSQL_BLOCK_DIST_L, TSDB_COL_NAME_LEN);
- return s;
-}
-
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name) {
SSchema s = {0};
@@ -68,6 +59,7 @@ bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
}
+// TODO refactor
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0) {
assert(src == NULL);
@@ -264,7 +256,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) {
return -1;
}
- int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
+ int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
@@ -314,7 +306,7 @@ bool tIsValidName(const SName* name) {
SName* tNameDup(const SName* name) {
assert(name != NULL);
- SName* p = calloc(1, sizeof(SName));
+ SName* p = malloc(sizeof(SName));
memcpy(p, name, sizeof(SName));
return p;
}
@@ -327,7 +319,7 @@ int32_t tNameGetDbName(const SName* name, char* dst) {
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
- snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
+ snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN, // there is a over write risk
"%s.%s", name->acctId, name->dbname);
return 0;
}
diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c
index 6fa27a029bfd5356cca3e34dffe8d3018ade9fd8..ee940531e6672e1c9b30a6db037d62bc8eac922b 100644
--- a/src/common/src/ttypes.c
+++ b/src/common/src/ttypes.c
@@ -405,7 +405,7 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT;
}
-void setVardataNull(char* val, int32_t type) {
+void setVardataNull(void* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(val, sizeof(int8_t));
*(uint8_t*) varDataVal(val) = TSDB_DATA_BINARY_NULL;
@@ -417,105 +417,107 @@ void setVardataNull(char* val, int32_t type) {
}
}
-void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
+void setNull(void *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); }
-void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
+void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BOOL_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BOOL_NULL;
}
break;
case TSDB_DATA_TYPE_TINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_TINYINT_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_TINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_SMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_SMALLINT_NULL;
+ *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_SMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_INT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_INT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_INT_NULL;
}
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_BIGINT_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_BIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_UTINYINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint8_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UTINYINT_NULL;
+ *(uint8_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UTINYINT_NULL;
}
break;
case TSDB_DATA_TYPE_USMALLINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint16_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_USMALLINT_NULL;
+ *(uint16_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_USMALLINT_NULL;
}
break;
case TSDB_DATA_TYPE_UINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UINT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UINT_NULL;
}
break;
case TSDB_DATA_TYPE_UBIGINT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_UBIGINT_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_UBIGINT_NULL;
}
break;
case TSDB_DATA_TYPE_FLOAT:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_FLOAT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_FLOAT_NULL;
}
break;
case TSDB_DATA_TYPE_DOUBLE:
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint64_t *)(val + i * tDataTypes[type].bytes) = TSDB_DATA_DOUBLE_NULL;
+ *(uint64_t *)(POINTER_SHIFT(val, i * tDataTypes[type].bytes)) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
for (int32_t i = 0; i < numOfElems; ++i) {
- setVardataNull(val + i * bytes, type);
+ setVardataNull(POINTER_SHIFT(val, i * bytes), type);
}
break;
default: {
for (int32_t i = 0; i < numOfElems; ++i) {
- *(uint32_t *)(val + i * tDataTypes[TSDB_DATA_TYPE_INT].bytes) = TSDB_DATA_INT_NULL;
+ *(uint32_t *)(POINTER_SHIFT(val, i * tDataTypes[TSDB_DATA_TYPE_INT].bytes)) = TSDB_DATA_INT_NULL;
}
break;
}
}
}
-static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
-static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
-static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
-static uint32_t nullInt = TSDB_DATA_INT_NULL;
-static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
-static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
-static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
-static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
-static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
-static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
-static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
-
-static union {
- tstr str;
- char pad[sizeof(tstr) + 4];
-} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
-
-static void *nullValues[] = {
+static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
+static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
+static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
+static uint32_t nullInt = TSDB_DATA_INT_NULL;
+static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
+static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
+static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
+static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
+static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
+static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
+static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
+static SBinaryNullT nullBinary = {1, TSDB_DATA_BINARY_NULL};
+static SNCharNullT nullNchar = {4, TSDB_DATA_NCHAR_NULL};
+
+// static union {
+// tstr str;
+// char pad[sizeof(tstr) + 4];
+// } nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
+
+static const void *nullValues[] = {
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
&nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu,
};
-void *getNullValue(int32_t type) {
+const void *getNullValue(int32_t type) {
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT);
return nullValues[type - 1];
}
@@ -632,7 +634,15 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo
}
// the string may be overflow according to errno
- *value = issigned? strtoll(z, &endPtr, radix):strtoull(z, &endPtr, radix);
+ if (!issigned) {
+ const char *p = z;
+ while(*p != 0 && *p == ' ') p++;
+ if (*p != 0 && *p == '-') { return -1;}
+
+ *value = strtoull(z, &endPtr, radix);
+ } else {
+ *value = strtoll(z, &endPtr, radix);
+ }
// not a valid integer number, return error
if (endPtr - z != n || errno == ERANGE) {
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index 3f9e21998348b72989c062ca269f2d59d3ace3cf..42ece19588d357ef551a4ecb9329dea246d2bae4 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -77,6 +77,10 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
pVar->nLen = strRmquote(pVar->pz, token->n);
break;
}
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ pVar->i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
+ break;
+ }
default: { // nType == 0 means the null value
type = TSDB_DATA_TYPE_NULL;
@@ -403,6 +407,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
wchar_t *pWStr = calloc(1, (nLen + 1) * TSDB_NCHAR_SIZE);
bool ret = taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE, NULL);
if (!ret) {
+ tfree(pWStr);
return -1;
}
@@ -602,7 +607,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
}
errno = 0; // reset global error code
- int64_t result;
+ int64_t result = 0;
switch (type) {
case TSDB_DATA_TYPE_BOOL: {
@@ -870,7 +875,8 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
free(pVariant->pz);
pVariant->dKey = v;
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
- pVariant->dKey = (double)(pVariant->i64);
+ double tmp = (double) pVariant->i64;
+ pVariant->dKey = tmp;
}
pVariant->nType = TSDB_DATA_TYPE_DOUBLE;
diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs
index 2c150341f62d16372a99d341a495771e4c2a3dbc..e6c3a598adc0bc4bcf5ea84953f649b418199555 100644
--- a/src/connector/C#/TDengineDriver.cs
+++ b/src/connector/C#/TDengineDriver.cs
@@ -163,5 +163,8 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
}
}
diff --git a/src/connector/go b/src/connector/go
index b8f76da4a708d158ec3cc4b844571dc4414e36b4..050667e5b4d0eafa5387e4283e713559b421203f 160000
--- a/src/connector/go
+++ b/src/connector/go
@@ -1 +1 @@
-Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
+Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f
diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin
index a44ec1ca493ad01b2bf825b6418f69e11f548206..32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df 160000
--- a/src/connector/grafanaplugin
+++ b/src/connector/grafanaplugin
@@ -1 +1 @@
-Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206
+Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df
diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension
index ce5201014136503d34fecbd56494b67b4961056c..b62a26ecc164a310104df57691691b237e091c89 160000
--- a/src/connector/hivemq-tdengine-extension
+++ b/src/connector/hivemq-tdengine-extension
@@ -1 +1 @@
-Subproject commit ce5201014136503d34fecbd56494b67b4961056c
+Subproject commit b62a26ecc164a310104df57691691b237e091c89
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index 2f211e9fba441fa87fb7ecd2ed2236595e70efbe..e432dac1cea593b371a173f334e5313236091ab3 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -12,4 +12,4 @@ IF (TD_MVN_INSTALLED)
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
-ENDIF ()
\ No newline at end of file
+ENDIF ()
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 5a6d007f9a426af0cf6b5bd034e3facc4c8b4d89..6b9fc9d96ce16700ee1243ef7c148a423a965d0b 100644
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -112,19 +112,15 @@
**/*Test.java
- **/TimestampPrecisionInNanoInJniTest.java
- **/NanoSecondTimestampJNITest.java
- **/NanoSecondTimestampRestfulTest.java
**/AppMemoryLeakTest.java
- **/AuthenticationTest.java
**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
**/DatetimeBefore1970Test.java
**/FailOverTest.java
**/InvalidResultSetPointerTest.java
- **/RestfulConnectionTest.java
**/TSDBJNIConnectorTest.java
**/TaosInfoMonitorTest.java
**/UnsignedNumberJniTest.java
+ **/TimeZoneTest.java
true
diff --git a/src/connector/jdbc/readme.md b/src/connector/jdbc/readme.md
index e81f078c153046265cbe9a856f7b48e26fc071fc..3c52ebb00ad3d6d9d32b9fd1400b0b12facd0576 100644
--- a/src/connector/jdbc/readme.md
+++ b/src/connector/jdbc/readme.md
@@ -1,54 +1,62 @@
+# Java Connector
-## TAOS-JDBCDriver 概述
+TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
-TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
+`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
-由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+
-* libtaos.so
- 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-
-* taos.dll
- 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。
+上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
-TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
+* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。
+* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。
+* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。
-* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
-* 由于不支持删除和修改,所以也不支持事务操作。
-* 目前不支持表间的 union 操作。
-* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。
+TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
+* TDengine 目前不支持针对单条数据记录的删除操作。
+* 目前不支持事务操作。
+* 目前不支持嵌套查询(nested query)。
+* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
-| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
-| --- | --- | --- |
-| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+## JDBC-JNI和JDBC-RESTful的对比
-## TDengine DataType 和 Java DataType
+
+对比项 JDBC-JNI JDBC-RESTful
+
+ 支持的操作系统
+ linux、windows
+ 全平台
+
+
+ 是否需要安装 client
+ 需要
+ 不需要
+
+
+ server 升级后是否需要升级 client
+ 需要
+ 不需要
+
+
+ 写入性能
+ JDBC-RESTful 是 JDBC-JNI 的 50%~90%
+
+
+ 查询性能
+ JDBC-RESTful 与 JDBC-JNI 没有差别
+
+
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
-| TDengine DataType | Java DataType |
-| --- | --- |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT, TINYINT |java.lang.Short |
-| BOOL | java.lang.Boolean |
-| BINARY, NCHAR | java.lang.String |
-
-## 如何获取 TAOS-JDBCDriver
+## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
+
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@@ -56,56 +64,86 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
maven 项目中使用如下 pom.xml 配置即可:
```xml
-
-
- com.taosdata.jdbc
- taos-jdbcdriver
- 1.0.3
-
-
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.18
+
```
### 源码编译打包
-下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
+下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
+
-## 使用说明
+## JDBC的使用说明
### 获取连接
-如下所示配置即可获取 TDengine Connection:
+#### 指定URL获取连接
+
+通过指定URL获取连接,如下所示:
+
+```java
+Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
+String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+
+以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
+
+使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
+
+1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
+2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
+3. 使用 6041 作为连接端口。
+
+如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示:
+
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
-String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
+String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
-> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
-TDengine 的 JDBC URL 规范格式为:
-`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
-其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
+**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+* libtaos.so
+ 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+
+* taos.dll
+ 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
+
+> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+
+JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
+
+TDengine 的 JDBC URL 规范格式为:
+`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
+
+url中的配置参数如下:
* user:登录 TDengine 用户名,默认值 root。
* password:用户登录密码,默认值 taosdata。
-* charset:客户端使用的字符集,默认值为系统字符集。
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* charset:客户端使用的字符集,默认值为系统字符集。
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
-以上参数可以在 3 处配置,`优先级由高到低`分别如下:
-1. JDBC URL 参数
- 如上所述,可以在 JDBC URL 的参数中指定。
-2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
+
+
+#### 指定URL和Properties获取连接
+
+除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
- String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
+ // Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
+ String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
+ // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
- connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
@@ -114,22 +152,68 @@ public Connection getConn() throws Exception{
}
```
-3. 客户端配置文件 taos.cfg
+以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
+
+properties 中的配置参数如下:
+* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。
+* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。
+* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
+* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。
+* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。
+
+
+
+#### 使用客户端配置文件建立连接
+
+当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
+如下所示:
- linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
-```properties
-# client default username
-# defaultUser root
+1. 在 Java 应用中不指定 hostname 和 port
-# client default password
-# defaultPass taosdata
+```java
+public Connection getConn() throws Exception{
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+}
+```
+
+2. 在配置文件中指定 firstEp 和 secondEp
+
+```
+# first fully qualified domain name (FQDN) for TDengine system
+firstEp cluster_node1:6030
+
+# second fully qualified domain name (FQDN) for TDengine system, for cluster only
+secondEp cluster_node2:6030
# default system charset
-# charset UTF-8
+# charset UTF-8
# system locale
# locale en_US.UTF-8
```
+
+以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
+TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
+
+> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
+
+#### 配置参数的优先级
+
+通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下:
+1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
+2. Properties connProps
+3. 客户端配置文件 taos.cfg
+
+例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。
+
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
@@ -146,6 +230,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
+
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@@ -156,6 +241,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
+
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
@@ -177,8 +263,150 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
+
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
+### 处理异常
+
+在报错后,通过SQLException可以获取到错误的信息和错误码:
+
+```java
+try (Statement statement = connection.createStatement()) {
+ // executeQuery
+ ResultSet resultSet = statement.executeQuery(sql);
+ // print result
+ printResult(resultSet);
+} catch (SQLException e) {
+ System.out.println("ERROR Message: " + e.getMessage());
+ System.out.println("ERROR Code: " + e.getErrorCode());
+ e.printStackTrace();
+}
+```
+
+JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
+具体的错误码请参考:
+* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
+
+### 通过参数绑定写入数据
+
+从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
+
+```java
+Statement stmt = conn.createStatement();
+Random r = new Random();
+
+// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
+TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
+
+// 设定数据表名:
+s.setTableName("w1");
+// 设定 TAGS 取值:
+s.setTagInt(0, r.nextInt(10));
+s.setTagString(1, "Beijing");
+
+int numOfRows = 10;
+
+// VALUES 部分以逐列的方式进行设置:
+ArrayList ts = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ ts.add(System.currentTimeMillis() + i);
+}
+s.setTimestamp(0, ts);
+
+ArrayList s1 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s1.add(r.nextInt(100));
+}
+s.setInt(1, s1);
+
+ArrayList s2 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s2.add("test" + r.nextInt(100));
+}
+s.setString(2, s2, 10);
+
+// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
+s.columnDataAddBatch();
+// 执行绑定数据后的语句:
+s.columnDataExecuteBatch();
+// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
+s.columnDataClearBatch();
+// 执行完毕,释放资源:
+s.columnDataCloseBatch();
+```
+
+用于设定 TAGS 取值的方法总共有:
+```java
+public void setTagNull(int index, int type)
+public void setTagBoolean(int index, boolean value)
+public void setTagInt(int index, int value)
+public void setTagByte(int index, byte value)
+public void setTagShort(int index, short value)
+public void setTagLong(int index, long value)
+public void setTagTimestamp(int index, long value)
+public void setTagFloat(int index, float value)
+public void setTagDouble(int index, double value)
+public void setTagString(int index, String value)
+public void setTagNString(int index, String value)
+```
+
+用于设定 VALUES 数据列的取值的方法总共有:
+```java
+public void setInt(int columnIndex, ArrayList list) throws SQLException
+public void setFloat(int columnIndex, ArrayList list) throws SQLException
+public void setTimestamp(int columnIndex, ArrayList list) throws SQLException
+public void setLong(int columnIndex, ArrayList list) throws SQLException
+public void setDouble(int columnIndex, ArrayList list) throws SQLException
+public void setBoolean(int columnIndex, ArrayList list) throws SQLException
+public void setByte(int columnIndex, ArrayList list) throws SQLException
+public void setShort(int columnIndex, ArrayList list) throws SQLException
+public void setString(int columnIndex, ArrayList list, int size) throws SQLException
+public void setNString(int columnIndex, ArrayList list, int size) throws SQLException
+```
+其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
+
+### 订阅
+
+#### 创建
+
+```java
+TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
+```
+
+`subscribe` 方法的三个参数含义如下:
+
+* topic:订阅的主题(即名称),此参数是订阅的唯一标识
+* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
+* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
+
+如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
+
+#### 消费数据
+
+```java
+int total = 0;
+while(true) {
+ TSDBResultSet rs = sub.consume();
+ int count = 0;
+ while(rs.next()) {
+ count++;
+ }
+ total += count;
+ System.out.printf("%d rows consumed, total %d\n", count, total);
+ Thread.sleep(1000);
+}
+```
+
+`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
+
+#### 关闭订阅
+
+```java
+sub.close(true);
+```
+
+`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
@@ -187,12 +415,17 @@ resultSet.close();
stmt.close();
conn.close();
```
+
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
+
+
+
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
+
```xml
com.zaxxer
@@ -202,31 +435,34 @@ conn.close();
```
* 使用示例如下:
+
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
+ // jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
-
- config.setMinimumIdle(3); //minimum number of idle connection
+ // connection pool configurations
+ config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
- config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
- config.setIdleTimeout(60000); // max idle time for recycle idle connection
- config.setConnectionTestQuery("describe log.dn"); //validation query
- config.setValidationTimeout(3000); //validation query timeout
+ config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
+ config.setMaxLifetime(0); // maximum life time for each connection
+ config.setIdleTimeout(0); // max idle time for recycle idle connection
+ config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
-
+
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
-
- //query or insert
+
+ //query or insert
// ...
-
+
connection.close(); // put back to conneciton pool
}
```
+
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@@ -243,40 +479,32 @@ conn.close();
```
* 使用示例如下:
+
```java
public static void main(String[] args) throws Exception {
- Properties properties = new Properties();
- properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
- properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
- properties.put("username","root");
- properties.put("password","taosdata");
-
- properties.put("maxActive","10"); //maximum number of connection in the pool
- properties.put("initialSize","3");//initial number of connection
- properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
- properties.put("minIdle","3");//minimum number of connection in the pool
-
- properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
-
- properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
- properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
-
- properties.put("validationQuery","describe log.dn"); //validation query
- properties.put("testWhileIdle","true"); // test connection while idle
- properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
- properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
-
- //create druid datasource
- DataSource ds = DruidDataSourceFactory.createDataSource(properties);
- Connection connection = ds.getConnection(); // get connection
- Statement statement = connection.createStatement(); // get statement
+ DruidDataSource dataSource = new DruidDataSource();
+ // jdbc properties
+ dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
+ dataSource.setUrl(url);
+ dataSource.setUsername("root");
+ dataSource.setPassword("taosdata");
+ // pool configurations
+ dataSource.setInitialSize(10);
+ dataSource.setMinIdle(10);
+ dataSource.setMaxActive(10);
+ dataSource.setMaxWait(30000);
+ dataSource.setValidationQuery("select server_status()");
+
+ Connection connection = dataSource.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
+
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@@ -291,29 +519,64 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
+
+
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
+
+
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+
+| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
+| -------------------- | ----------------- | -------- |
+| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
+| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
+| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
+| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+
+
+
+## TDengine DataType 和 Java DataType
+
+TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+
+| TDengine DataType | Java DataType |
+| ----------------- | ------------------ |
+| TIMESTAMP | java.sql.Timestamp |
+| INT | java.lang.Integer |
+| BIGINT | java.lang.Long |
+| FLOAT | java.lang.Float |
+| DOUBLE | java.lang.Double |
+| SMALLINT | java.lang.Short |
+| TINYINT | java.lang.Byte |
+| BOOL | java.lang.Boolean |
+| BINARY | byte array |
+| NCHAR | java.lang.String |
+
+
+
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
-
+
**原因**:程序没有找到依赖的本地函数库 taos。
-
- **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
-
+
+ **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
+
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
-
+
**原因**:目前 TDengine 只支持 64 位 JDK。
-
+
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
-
-
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
@@ -324,6 +587,9 @@ Query OK, 1 row(s) in set (0.000141s)
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
-[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
+[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
-[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
\ No newline at end of file
+[13]: https://www.taosdata.com/cn/documentation/administrator/#client
+[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
+[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF
+
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
index 3c9c784f594d6cb022267c2ff1cd848c26f53ac3..7dbb62d8496e9ae9b758c1a6440531e15e352dc9 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java
@@ -77,8 +77,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
- //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
- return false;
+ return false; //像database、table这些对象的标识符,在存储时是否采用大小写混合的模式
}
public boolean storesUpperCaseIdentifiers() throws SQLException {
@@ -514,7 +513,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex);
col6.setColName("TYPE_CAT");
- col6.setColType(Types.NCHAR);
+ col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6;
}
@@ -522,7 +521,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(colIndex);
col7.setColName("TYPE_SCHEM");
- col7.setColType(Types.NCHAR);
+ col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col7;
}
@@ -530,7 +529,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(colIndex);
col8.setColName("TYPE_NAME");
- col8.setColType(Types.NCHAR);
+ col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col8;
}
@@ -538,7 +537,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(colIndex);
col9.setColName("SELF_REFERENCING_COL_NAME");
- col9.setColType(Types.NCHAR);
+ col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col9;
}
@@ -546,7 +545,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(colIndex);
col10.setColName("REF_GENERATION");
- col10.setColType(Types.NCHAR);
+ col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col10;
}
@@ -592,7 +591,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex);
col4.setColName("TABLE_TYPE");
- col4.setColType(Types.NCHAR);
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}
@@ -734,7 +733,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(colIndex);
col1.setColName("TABLE_CAT");
- col1.setColType(Types.NCHAR);
+ col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col1;
}
@@ -742,7 +741,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(colIndex);
col2.setColName("TABLE_SCHEM");
- col2.setColType(Types.NCHAR);
+ col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col2;
}
@@ -751,7 +750,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col3.setColIndex(colIndex);
col3.setColName("TABLE_NAME");
col3.setColSize(193);
- col3.setColType(Types.NCHAR);
+ col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col3;
}
@@ -760,7 +759,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
col4.setColIndex(colIndex);
col4.setColName("COLUMN_NAME");
col4.setColSize(65);
- col4.setColType(Types.NCHAR);
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}
@@ -768,7 +767,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex);
col5.setColName("DATA_TYPE");
- col5.setColType(Types.INTEGER);
+ col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col5;
}
@@ -776,7 +775,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("COLUMN_SIZE");
- col7.setColType(Types.INTEGER);
+ col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col7;
}
@@ -791,7 +790,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("DECIMAL_DIGITS");
- col9.setColType(Types.INTEGER);
+ col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col9;
}
@@ -799,7 +798,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("NUM_PREC_RADIX");
- col10.setColType(Types.INTEGER);
+ col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col10;
}
@@ -807,7 +806,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col11 = new ColumnMetaData();
col11.setColIndex(11);
col11.setColName("NULLABLE");
- col11.setColType(Types.INTEGER);
+ col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col11;
}
@@ -815,7 +814,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col12 = new ColumnMetaData();
col12.setColIndex(colIndex);
col12.setColName("REMARKS");
- col12.setColType(Types.NCHAR);
+ col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col12;
}
@@ -823,7 +822,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col13 = new ColumnMetaData();
col13.setColIndex(13);
col13.setColName("COLUMN_DEF");
- col13.setColType(Types.NCHAR);
+ col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col13;
}
@@ -831,7 +830,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col14 = new ColumnMetaData();
col14.setColIndex(14);
col14.setColName("SQL_DATA_TYPE");
- col14.setColType(Types.INTEGER);
+ col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col14;
}
@@ -839,7 +838,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col15 = new ColumnMetaData();
col15.setColIndex(15);
col15.setColName("SQL_DATETIME_SUB");
- col15.setColType(Types.INTEGER);
+ col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col15;
}
@@ -847,7 +846,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col16 = new ColumnMetaData();
col16.setColIndex(16);
col16.setColName("CHAR_OCTET_LENGTH");
- col16.setColType(Types.INTEGER);
+ col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col16;
}
@@ -855,7 +854,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col17 = new ColumnMetaData();
col17.setColIndex(17);
col17.setColName("ORDINAL_POSITION");
- col17.setColType(Types.INTEGER);
+ col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
return col17;
}
@@ -863,7 +862,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col18 = new ColumnMetaData();
col18.setColIndex(18);
col18.setColName("IS_NULLABLE");
- col18.setColType(Types.NCHAR);
+ col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col18;
}
@@ -871,7 +870,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col19 = new ColumnMetaData();
col19.setColIndex(19);
col19.setColName("SCOPE_CATALOG");
- col19.setColType(Types.NCHAR);
+ col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col19;
}
@@ -879,7 +878,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col20 = new ColumnMetaData();
col20.setColIndex(20);
col20.setColName("SCOPE_SCHEMA");
- col20.setColType(Types.NCHAR);
+ col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col20;
}
@@ -887,7 +886,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col21 = new ColumnMetaData();
col21.setColIndex(21);
col21.setColName("SCOPE_TABLE");
- col21.setColType(Types.NCHAR);
+ col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col21;
}
@@ -903,7 +902,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col23 = new ColumnMetaData();
col23.setColIndex(23);
col23.setColName("IS_AUTOINCREMENT");
- col23.setColType(Types.NCHAR);
+ col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col23;
}
@@ -911,7 +910,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col24 = new ColumnMetaData();
col24.setColIndex(24);
col24.setColName("IS_GENERATEDCOLUMN");
- col24.setColType(Types.NCHAR);
+ col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col24;
}
@@ -1205,7 +1204,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(colIndex);
col5.setColName("KEY_SEQ");
- col5.setColType(Types.SMALLINT);
+ col5.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
return col5;
}
@@ -1213,7 +1212,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(colIndex);
col6.setColName("PK_NAME");
- col6.setColType(Types.NCHAR);
+ col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col6;
}
@@ -1275,7 +1274,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(colIndex);
col4.setColName("SUPERTABLE_NAME");
- col4.setColType(Types.NCHAR);
+ col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
return col4;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
index 14e75f0e09e3403e703658fb503019fefbb6156d..8398c8f84bb8b73b09cd6b7e3e2f27a20f28e8f7 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java
@@ -16,7 +16,7 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
- private int colType = 0;
+ private int colType = 0; //taosType
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
index bda3d522123d09ece81384c6eba814c7e548e1ec..db4a5ccaa8fc15aa637363bc3f5e1b34c71dc5be 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java
@@ -68,71 +68,61 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override
public String getString(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getString(columnIndex, nativeType);
+ return rowCursor.getString(columnIndex, colType);
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getBoolean(columnIndex, nativeType);
+ return rowCursor.getBoolean(columnIndex, colType);
}
@Override
public byte getByte(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return (byte) rowCursor.getInt(columnIndex, nativeType);
+ return (byte) rowCursor.getInt(columnIndex, colType);
}
@Override
public short getShort(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return (short) rowCursor.getInt(columnIndex, nativeType);
+ return (short) rowCursor.getInt(columnIndex, colType);
}
@Override
public int getInt(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getInt(columnIndex, nativeType);
+ return rowCursor.getInt(columnIndex, colType);
}
@Override
public long getLong(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getLong(columnIndex, nativeType);
+ return rowCursor.getLong(columnIndex, colType);
}
@Override
public float getFloat(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getFloat(columnIndex, nativeType);
+ return rowCursor.getFloat(columnIndex, colType);
}
@Override
public double getDouble(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getDouble(columnIndex, nativeType);
+ return rowCursor.getDouble(columnIndex, colType);
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return (rowCursor.getString(columnIndex, nativeType)).getBytes();
+ return (rowCursor.getString(columnIndex, colType)).getBytes();
}
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- return rowCursor.getTimestamp(columnIndex, nativeType);
+ return rowCursor.getTimestamp(columnIndex, colType);
}
@Override
@@ -158,8 +148,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet {
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
int colType = columnMetaDataList.get(columnIndex - 1).getColType();
- int nativeType = TSDBConstants.jdbcType2TaosType(colType);
- double value = rowCursor.getDouble(columnIndex, nativeType);
+ double value = rowCursor.getDouble(columnIndex, colType);
return new BigDecimal(value);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
index 740e3c6c21be568bf71e4d68a3129c527da441a6..74a874513839fb076ce3f2dd9b2a6d0ecc72fb2e 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java
@@ -129,8 +129,9 @@ public abstract class TSDBConstants {
return Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
+ default:
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
}
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
@@ -160,7 +161,7 @@ public abstract class TSDBConstants {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type: " + taosType + " in tdengine");
}
}
@@ -187,7 +188,7 @@ public abstract class TSDBConstants {
case Types.NCHAR:
return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
}
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
}
public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException {
@@ -213,7 +214,7 @@ public abstract class TSDBConstants {
case Types.NCHAR:
return "NCHAR";
default:
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE, "unknown sql type: " + jdbcType + " in tdengine");
}
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index f5f16758c1eb6df0a007405412b7f32082dfb026..521a88b128ff930510bf00cdcb6a12cbc3211742 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -14,6 +14,8 @@
*****************************************************************************/
package com.taosdata.jdbc;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.*;
import java.util.logging.Logger;
@@ -127,6 +129,11 @@ public class TSDBDriver extends AbstractDriver {
return null;
}
+ if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
+ if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
+
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
index d626698663c648ee8c39bab4d5f7831099ba8c81..bdb3ea410005cadd865de1d9e080dd5b9f20834f 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
@@ -33,18 +33,20 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database, please see taoslog for more details");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection is NULL");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "JNI result set is NULL");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultSet");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed, please see taoslog for more details");
}
public static SQLException createSQLException(int errorCode) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
index 3c44d69be58c5b124493367e3d2efb8c7d835e53..2207db6f9379595e68b8ed00ea8f7298ca3b45ad 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
@@ -29,6 +29,9 @@ public class TSDBErrorNumbers {
public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision
public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317;
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
+ public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
+ public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
+
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@@ -67,6 +70,8 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE);
errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION);
errorNumbers.add(ERROR_RESTFul_Client_IOException);
+ errorNumbers.add(ERROR_USER_IS_REQUIRED);
+ errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index 4fdbb308c54c23a1fb427f1e9f1530894b0daae1..4a9e80ba53b096f057840eab67e61418332dbf81 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -36,7 +36,6 @@ public class TSDBJNIConnector {
static {
System.loadLibrary("taos");
- System.out.println("java.library.path:" + System.getProperty("java.library.path"));
}
public boolean isClosed() {
@@ -279,25 +278,20 @@ public class TSDBJNIConnector {
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException {
- long stmt;
- try {
- stmt = prepareStmtImp(sql.getBytes(), this.taos);
- } catch (Exception e) {
- e.printStackTrace();
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
- }
+ long stmt = prepareStmtImp(sql.getBytes(), this.taos);
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "connection already closed");
}
-
if (stmt == TSDBConstants.JNI_SQL_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
}
-
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
}
+ if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR);
+ }
return stmt;
}
@@ -314,8 +308,7 @@ public class TSDBJNIConnector {
private native int setBindTableNameImp(long stmt, String name, long conn);
public void setBindTableNameAndTags(long stmt, String tableName, int numOfTags, ByteBuffer tags, ByteBuffer typeList, ByteBuffer lengthList, ByteBuffer nullList) throws SQLException {
- int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(),
- nullList.array(), this.taos);
+ int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(), nullList.array(), this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind table name and corresponding tags");
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index 6211f61dc505d2ccba5f11f3aacc980771b1a110..ff49677b01fa1c3a4d482cebd51269d5f1589e43 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -32,6 +32,7 @@ import java.util.List;
import com.taosdata.jdbc.utils.NullType;
public class TSDBResultSetBlockData {
+ private static final int BINARY_LENGTH_OFFSET = 2;
private int numOfRows = 0;
private int rowIndex = 0;
@@ -404,10 +405,8 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
- bb.position(fieldSize * this.rowIndex);
-
+ bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort();
-
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isBinaryNull(dest, length)) {
@@ -419,16 +418,13 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
- bb.position(fieldSize * this.rowIndex);
-
+ bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort();
-
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isNcharNull(dest, length)) {
return null;
}
-
try {
String charset = TaosGlobalConfig.getCharset();
return new String(dest, charset);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
index 6292673352529171cdc42ba73e0f47f8f05a21a4..f93384fcc7a9693c2b187498cdf0f60371d28fd0 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java
@@ -110,7 +110,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1);
switch (columnMetaData.getColType()) {
-
+
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
return 5;
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
index 12a0ab57e2c35c7f1f550dd213db19a0effd4ebc..e818736096355c4937e5af0470b77c95486c86db 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
@@ -18,7 +18,7 @@ public class RestfulConnection extends AbstractConnection {
private final String url;
private final String database;
private final String token;
- /******************************************************/
+
private boolean isClosed;
private final DatabaseMetaData metadata;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
index 9ab67c5502e33d8f2337e3acc8d8eab425992e3a..0a8809e84f92f1e948ea5306648610dfeca57c8f 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
@@ -7,6 +7,7 @@ import com.taosdata.jdbc.utils.HttpClientPoolUtil;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.Properties;
import java.util.logging.Logger;
@@ -40,8 +41,13 @@ public class RestfulDriver extends AbstractDriver {
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
try {
- String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
- String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
+ if (!props.containsKey(TSDBDriver.PROPERTY_KEY_USER))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED);
+ if (!props.containsKey(TSDBDriver.PROPERTY_KEY_PASSWORD))
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
+
+ String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), StandardCharsets.UTF_8.displayName());
+ String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName());
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index f8acd8f06180476a09519c0809dd493d062c911c..21c76f73b287e55ef14f5d70cf6a911a9cb543db 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -7,6 +7,7 @@ import com.taosdata.jdbc.AbstractStatement;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError;
import com.taosdata.jdbc.TSDBErrorNumbers;
+import com.taosdata.jdbc.enums.TimestampFormat;
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
@@ -45,9 +46,7 @@ public class RestfulStatement extends AbstractStatement {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
- final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
-
- return executeOneUpdate(url, sql);
+ return executeOneUpdate(sql);
}
@Override
@@ -62,34 +61,25 @@ public class RestfulStatement extends AbstractStatement {
public boolean execute(String sql) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- if (!SqlSyntaxValidator.isValidForExecute(sql))
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE, "not a valid sql for execute: " + sql);
//如果执行了use操作应该将当前Statement的catalog设置为新的database
boolean result = true;
- String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
- if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
- }
- if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
- }
if (SqlSyntaxValidator.isUseSql(sql)) {
- HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
+ HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
result = false;
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
executeOneQuery(sql);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
- executeOneUpdate(url, sql);
+ executeOneUpdate(sql);
result = false;
} else {
if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) {
- executeQuery(sql);
+ executeOneQuery(sql);
} else {
- executeUpdate(sql);
+ executeOneUpdate(sql);
result = false;
}
}
@@ -97,19 +87,32 @@ public class RestfulStatement extends AbstractStatement {
return result;
}
- private ResultSet executeOneQuery(String sql) throws SQLException {
- if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
+ private String getUrl() throws SQLException {
+ String dbname = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_DBNAME);
+ if (dbname == null || dbname.trim().isEmpty()) {
+ dbname = "";
+ } else {
+ dbname = "/" + dbname.toLowerCase();
+ }
+ TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase());
+ String url;
+
+ switch (timestampFormat) {
+ case TIMESTAMP:
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt" + dbname;
+ break;
+ case UTC:
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc" + dbname;
+ break;
+ default:
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql" + dbname;
+ }
+ return url;
+ }
+ private ResultSet executeOneQuery(String sql) throws SQLException {
// row data
- String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
- String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
- if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
- if ("UTC".equalsIgnoreCase(timestampFormat))
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
-
- String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
+ String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
@@ -119,11 +122,8 @@ public class RestfulStatement extends AbstractStatement {
return resultSet;
}
- private int executeOneUpdate(String url, String sql) throws SQLException {
- if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
-
- String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
+ private int executeOneUpdate(String sql) throws SQLException {
+ String result = HttpClientPoolUtil.execute(getUrl(), sql, this.conn.getToken());
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
@@ -134,7 +134,7 @@ public class RestfulStatement extends AbstractStatement {
}
private int getAffectedRows(JSONObject jsonObject) throws SQLException {
- // create ... SQLs should return 0 , and Restful result is this:
+ // create ... SQLs should return 0 , and Restful result like this:
// {"status": "succ", "head": ["affected_rows"], "data": [[0]], "rows": 1}
JSONArray head = jsonObject.getJSONArray("head");
if (head.size() != 1 || !"affected_rows".equals(head.getString(0)))
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
index 0f99ff4f661ec48f1df4bba07cf50a410084b7df..cbd806b35a122b103443492e9770e6ee0804718d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java
@@ -16,8 +16,7 @@ package com.taosdata.jdbc.utils;
public class SqlSyntaxValidator {
- private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"};
- private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
+ private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set", "reset"};
private static final String[] querySQL = {"select", "show", "describe"};
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
@@ -38,14 +37,6 @@ public class SqlSyntaxValidator {
return false;
}
- public static boolean isValidForExecute(String sql) {
- for (String prefix : SQL) {
- if (sql.trim().toLowerCase().startsWith(prefix))
- return true;
- }
- return false;
- }
-
public static boolean isDatabaseUnspecifiedQuery(String sql) {
for (String databaseObj : databaseUnspecifiedShow) {
if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*"))
@@ -63,9 +54,5 @@ public class SqlSyntaxValidator {
return sql.trim().toLowerCase().startsWith("use");
}
- public static boolean isSelectSql(String sql) {
- return sql.trim().toLowerCase().startsWith("select");
- }
-
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
index 24c73fdd5c14af41039eb12a7713216166312a09..95307071e1c0388271e88796ff381ed40fb88a3f 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SubscribeTest.java
@@ -69,6 +69,8 @@ public class SubscribeTest {
@Before
public void createDatabase() throws SQLException {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
index 73853cf7a1ea9fbd118ac483daca50ab1deb1be7..3d76e1f98d4f8aa1d0ba3d68395e4036c5b069e6 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
@@ -297,7 +297,6 @@ public class TSDBPreparedStatementTest {
}
}
- /*
@Test
public void executeTest() throws SQLException {
Statement stmt = conn.createStatement();
@@ -586,8 +585,131 @@ public class TSDBPreparedStatementTest {
}
Assert.assertEquals(numOfRows, rows);
}
- */
+ @Test
+ public void bindDataQueryTest() throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
+
+ int numOfRows = 1;
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
+ s.setTableName("w2");
+ s.setTagInt(0, 1);
+ s.setTagString(1, "test");
+
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add("test" + i % 4);
+ }
+ s.setString(1, s2, 10);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+
+ String sql = "select * from weather_test where t1 >= ? and t1 <= ?";
+ TSDBPreparedStatement s1 = (TSDBPreparedStatement) conn.prepareStatement(sql);
+ s1.setInt(1, 0);
+ s1.setInt(2, 10);
+
+ ResultSet rs = s1.executeQuery();
+ int rows = 0;
+ while (rs.next()) {
+ rows++;
+ }
+ Assert.assertEquals(numOfRows, rows);
+ }
+
+ @Test
+ public void setTagNullTest()throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(10), t9 nchar(10))");
+
+ int numOfRows = 1;
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?,?,?,?,?,?,?,?) values(?, ?)");
+ s.setTableName("w3");
+ s.setTagNull(0, TSDBConstants.TSDB_DATA_TYPE_TINYINT);
+ s.setTagNull(1, TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
+ s.setTagNull(2, TSDBConstants.TSDB_DATA_TYPE_INT);
+ s.setTagNull(3, TSDBConstants.TSDB_DATA_TYPE_BIGINT);
+ s.setTagNull(4, TSDBConstants.TSDB_DATA_TYPE_FLOAT);
+ s.setTagNull(5, TSDBConstants.TSDB_DATA_TYPE_DOUBLE);
+ s.setTagNull(6, TSDBConstants.TSDB_DATA_TYPE_BOOL);
+ s.setTagNull(7, TSDBConstants.TSDB_DATA_TYPE_BINARY);
+ s.setTagNull(8, TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add(i);
+ }
+ s.setInt(1, s2);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+ }
+
+ private String stringGenerator(int length) {
+ String source = "abcdefghijklmnopqrstuvwxyz";
+ StringBuilder sb = new StringBuilder();
+ Random rand = new Random();
+ for(int i = 0; i < length; i++) {
+ sb.append(source.charAt(rand.nextInt(26)));
+ }
+ return sb.toString();
+ }
+
+ @Test(expected = SQLException.class)
+ public void setMaxTableNameTest()throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 int)");
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?)");
+ String tbname = stringGenerator(193);
+ s.setTableName(tbname);
+ s.setTagInt(0, 1);
+
+ int numOfRows = 1;
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add(i);
+ }
+ s.setInt(1, s2);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+ }
+
+
@Test(expected = SQLException.class)
public void createTwoSameDbTest() throws SQLException {
// when
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
index 6702de9bdbf566eb1ecaea322d0338a64ffcd40c..0ea46dade29316b99447a6ea4e372bc8057670e8 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java
@@ -1,6 +1,9 @@
package com.taosdata.jdbc.cases;
+import com.taosdata.jdbc.TSDBErrorNumbers;
+import org.junit.Assert;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import java.sql.*;
@@ -12,6 +15,47 @@ public class AuthenticationTest {
private static final String password = "taos?data";
private Connection conn;
+ @Test
+ public void connectWithoutUserByJni() {
+ try {
+ DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?");
+ } catch (SQLException e) {
+ Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
+ Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
+ }
+ }
+
+ @Test
+ public void connectWithoutUserByRestful() {
+ try {
+ DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?");
+ } catch (SQLException e) {
+ Assert.assertEquals(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, e.getErrorCode());
+ Assert.assertEquals("ERROR (2319): user is required", e.getMessage());
+ }
+ }
+
+ @Test
+ public void connectWithoutPasswordByJni() {
+ try {
+ DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root");
+ } catch (SQLException e) {
+ Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
+ Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
+ }
+ }
+
+ @Test
+ public void connectWithoutPasswordByRestful() {
+ try {
+ DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root");
+ } catch (SQLException e) {
+ Assert.assertEquals(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, e.getErrorCode());
+ Assert.assertEquals("ERROR (231a): password is required", e.getMessage());
+ }
+ }
+
+ @Ignore
@Test
public void test() {
// change password
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
index e175d6d1141e125d58f2a1e4a4f64c3d1b22bfbb..f2b102cfe7ceb0ec4b8af74c76dc05317948dc98 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java
@@ -29,6 +29,8 @@ public class BatchInsertTest {
public void before() {
try {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
index bc11c7f34eeb719574a35beaf186cf637df2826f..1297a6b4c4eb0eca208950363c13e9bb4d1cd3a9 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java
@@ -21,6 +21,8 @@ public class ImportTest {
public static void before() {
try {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
index 9f8243542f0c2cf760ca192a0d39293531a5e42c..ac254bebf39f55b358883716e23ba72b695703f7 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java
@@ -270,6 +270,41 @@ public class InsertSpecialCharacterJniTest {
}
}
+ @Ignore
+ @Test
+ public void testSingleQuotaEscape() throws SQLException {
+ final long now = System.currentTimeMillis();
+ final String sql = "insert into t? using ? tags(?) values(?, ?, ?) t? using " + tbname2 + " tags(?) values(?,?,?) ";
+ try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
+ // t1
+ pstmt.setInt(1, 1);
+ pstmt.setString(2, tbname2);
+ pstmt.setString(3, special_character_str_5);
+ pstmt.setTimestamp(4, new Timestamp(now));
+ pstmt.setBytes(5, special_character_str_5.getBytes());
+ // t2
+ pstmt.setInt(7, 2);
+ pstmt.setString(8, special_character_str_5);
+ pstmt.setTimestamp(9, new Timestamp(now));
+ pstmt.setString(11, special_character_str_5);
+
+ int ret = pstmt.executeUpdate();
+ Assert.assertEquals(2, ret);
+ }
+
+ String query = "select * from ?.t? where ? < ? and ts >= ? and f1 is not null";
+ try (PreparedStatement pstmt = conn.prepareStatement(query)) {
+ pstmt.setString(1, dbName);
+ pstmt.setInt(2, 1);
+ pstmt.setString(3, "ts");
+ pstmt.setTimestamp(4, new Timestamp(System.currentTimeMillis()));
+ pstmt.setTimestamp(5, new Timestamp(0));
+
+ ResultSet rs = pstmt.executeQuery();
+ Assert.assertNotNull(rs);
+ }
+ }
+
@Test
public void testCase10() throws SQLException {
final long now = System.currentTimeMillis();
@@ -293,13 +328,12 @@ public class InsertSpecialCharacterJniTest {
Assert.assertEquals(2, ret);
}
//query t1
- String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
- pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
@@ -311,12 +345,11 @@ public class InsertSpecialCharacterJniTest {
Assert.assertNull(f2);
}
// query t2
- query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
- pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
index 2e981e7f414590c8d8be46659a415cb244a949ae..eedccec6f1ad3aecbaebbd525788a68e7c236511 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java
@@ -293,13 +293,12 @@ public class InsertSpecialCharacterRestfulTest {
Assert.assertEquals(2, ret);
}
//query t1
- String query = "select * from ?.t? where ts < ? and ts >= ? and ? is not null";
+ String query = "select * from ?.t? where ts < ? and ts >= ? and f1 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setString(1, dbName);
pstmt.setInt(2, 1);
pstmt.setTimestamp(3, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(4, new Timestamp(0));
- pstmt.setString(5, "f1");
ResultSet rs = pstmt.executeQuery();
rs.next();
@@ -311,12 +310,11 @@ public class InsertSpecialCharacterRestfulTest {
Assert.assertNull(f2);
}
// query t2
- query = "select * from t? where ts < ? and ts >= ? and ? is not null";
+ query = "select * from t? where ts < ? and ts >= ? and f2 is not null";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
pstmt.setInt(1, 2);
pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
pstmt.setTimestamp(3, new Timestamp(0));
- pstmt.setString(4, "f2");
ResultSet rs = pstmt.executeQuery();
rs.next();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..18a2c32aca0535567dd42e886bc87ae618596a40
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java
@@ -0,0 +1,101 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class MultiConnectionWithDifferentDbTest {
+
+ private static String host = "127.0.0.1";
+ private static String db1 = "db1";
+ private static String db2 = "db2";
+
+ private long ts;
+
+ @Test
+ public void test() {
+ List threads = IntStream.range(1, 3).mapToObj(i -> new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < 10; j++) {
+ queryDb();
+ try {
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private void queryDb() {
+ String url = "jdbc:TAOS-RS://" + host + ":6041/db" + i + "?user=root&password=taosdata";
+ try (Connection connection = DriverManager.getConnection(url)) {
+ Statement stmt = connection.createStatement();
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ assertNotNull(rs);
+ rs.next();
+ long actual = rs.getTimestamp("ts").getTime();
+ assertEquals(ts, actual);
+
+ int f1 = rs.getInt("f1");
+ assertEquals(i, f1);
+
+ String loc = i == 1 ? "beijing" : "shanghai";
+ String loc_actual = rs.getString("loc");
+ assertEquals(loc, loc_actual);
+
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ }, "thread-" + i)).collect(Collectors.toList());
+
+ threads.forEach(Thread::start);
+
+ for (Thread t : threads) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ }
+
+ @Before
+ public void before() {
+ ts = System.currentTimeMillis();
+
+ try {
+ Connection conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + db1);
+ stmt.execute("create database if not exists " + db1);
+ stmt.execute("use " + db1);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('beijing') values(" + ts + ", 1)");
+
+ stmt.execute("drop database if exists " + db2);
+ stmt.execute("create database if not exists " + db2);
+ stmt.execute("use " + db2);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('shanghai') values(" + ts + ", 2)");
+
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
index 535e56f7d7735a7cbd209fbb2a2fddd492021e15..b4449491a93c2ffc857448b8697e771eabd0f97a 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java
@@ -22,6 +22,8 @@ public class QueryDataTest {
public void createDatabase() {
try {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
@@ -34,9 +36,8 @@ public class QueryDataTest {
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
-
} catch (SQLException e) {
- return;
+ e.printStackTrace();
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java
index 4eebbd08e87b2e85ce319bc0bc98bc4515bd2077..48753d62f038002eae386ec31ce62b34bf1ef179 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java
@@ -1,51 +1,49 @@
package com.taosdata.jdbc.cases;
-import com.taosdata.jdbc.TSDBDriver;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
-import java.sql.*;
-import java.util.Properties;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
public class ResetQueryCacheTest {
- static Connection connection;
- static Statement statement;
- static String host = "127.0.0.1";
-
- @Before
- public void init() {
- try {
- Properties properties = new Properties();
- properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
- properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
- connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
- statement = connection.createStatement();
- } catch (SQLException e) {
- return;
- }
+ @Test
+ public void jni() throws SQLException {
+ // given
+ Connection connection = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:0/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
+ Statement statement = connection.createStatement();
+
+ // when
+ boolean execute = statement.execute("reset query cache");
+
+ // then
+ assertFalse(execute);
+ assertEquals(0, statement.getUpdateCount());
+
+ statement.close();
+ connection.close();
}
@Test
- public void testResetQueryCache() throws SQLException {
- String resetSql = "reset query cache";
- statement.execute(resetSql);
- }
+ public void restful() throws SQLException {
+ // given
+ Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://127.0.0.1:6041/?user=root&password=taosdata&timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8");
+ Statement statement = connection.createStatement();
+
+ // when
+ boolean execute = statement.execute("reset query cache");
+
+ // then
+ assertFalse(execute);
+ assertEquals(0, statement.getUpdateCount());
- @After
- public void close() {
- try {
- if (statement != null)
- statement.close();
- if (connection != null)
- connection.close();
- } catch (SQLException e) {
- e.printStackTrace();
- }
+ statement.close();
+ connection.close();
}
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
index 0022ceaf2123ac03192f761ef068ecf5ad333e6d..b51c0309be809250839a5241d12296182067dfd3 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java
@@ -20,6 +20,8 @@ public class SelectTest {
public void createDatabaseAndTable() {
try {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
index 1600fec13d1f2a56caf3905b863aa132fe1de830..8e10743e5e298f63e57007bcdd44925bf43f8187 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java
@@ -24,6 +24,8 @@ public class StableTest {
public static void createDatabase() {
try {
Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..94a175ad5c7fd50fa35d6b45ea59ab26ffc02ce1
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java
@@ -0,0 +1,71 @@
+package com.taosdata.jdbc.cases;
+
+import com.taosdata.jdbc.TSDBDriver;
+import org.junit.Test;
+
+import java.sql.*;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.util.Properties;
+
+public class TimeZoneTest {
+
+ private String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
+
+ @Test
+ public void javaTimeZone() {
+ LocalDateTime localDateTime = LocalDateTime.of(1970, 1, 1, 0, 0, 0);
+
+ Instant instant = localDateTime.atZone(ZoneId.of("UTC-8")).toInstant();
+ System.out.println("UTC-8: " + instant.getEpochSecond() + "," + instant);
+
+ instant = localDateTime.atZone(ZoneId.of("UT")).toInstant();
+ System.out.println("UTC: " + instant.getEpochSecond() + "," + instant);
+
+
+ instant = localDateTime.atZone(ZoneId.of("UTC+8")).toInstant();
+ System.out.println("UTC+8: " + instant.getEpochSecond() + "," + instant);
+ }
+
+ @Test
+ public void taosTimeZone() {
+ // given
+ Properties props = new Properties();
+ props.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+
+ // when and then
+ try (Connection connection = DriverManager.getConnection(url, props)) {
+ Statement stmt = connection.createStatement();
+
+ stmt.execute("drop database if exists timezone_test");
+ stmt.execute("create database if not exists timezone_test keep 365000");
+ stmt.execute("use timezone_test");
+ stmt.execute("create table weather(ts timestamp, temperature float)");
+
+ stmt.execute("insert into timezone_test.weather(ts, temperature) values('1970-01-01 00:00:00', 1.0)");
+
+ ResultSet rs = stmt.executeQuery("select * from timezone_test.weather");
+ while (rs.next()) {
+ Timestamp ts = rs.getTimestamp("ts");
+ System.out.println("ts: " + ts.getTime() + "," + ts);
+ }
+
+ stmt.execute("insert into timezone_test.weather(ts, temperature, humidity) values('1970-01-02 00:00:00', 1.0, 2.0)");
+
+ rs = stmt.executeQuery("select * from timezone_test.weather");
+ while (rs.next()) {
+ Timestamp ts = rs.getTimestamp("ts");
+ System.out.println("ts: " + ts.getTime() + "," + ts);
+ }
+
+
+ stmt.execute("drop database if exists timezone_test");
+
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..9fe51e7203fac7133783e47fd5b0cc07f33b2494
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java
@@ -0,0 +1,69 @@
+package com.taosdata.jdbc.rs;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+
+import static org.junit.Assert.*;
+
+public class DatabaseSpecifiedTest {
+
+ private static String host = "127.0.0.1";
+ private static String dbname = "test_db_spec";
+
+ private Connection connection;
+ private long ts;
+
+ @Test
+ public void test() throws SQLException {
+ // when
+ connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/" + dbname + "?user=root&password=taosdata");
+ try (Statement stmt = connection.createStatement();) {
+ ResultSet rs = stmt.executeQuery("select * from weather");
+
+ //then
+ assertNotNull(rs);
+ rs.next();
+ long now = rs.getTimestamp("ts").getTime();
+ assertEquals(ts, now);
+ int f1 = rs.getInt(2);
+ assertEquals(1, f1);
+ String loc = rs.getString("loc");
+ assertEquals("beijing", loc);
+ }
+ connection.close();
+ }
+
+ @Before
+ public void before() {
+ ts = System.currentTimeMillis();
+ try {
+ Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
+ Statement stmt = connection.createStatement();
+
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('beijing') values( " + ts + ", 1)");
+
+ stmt.close();
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try {
+ if (connection != null)
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
index abd60f5b63d46b406f19b6be9dcbbab6b786de12..1c5c03aacb5e7ed5683c75414975224a67d49e21 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
@@ -9,6 +9,8 @@ import org.junit.Test;
import java.sql.*;
import java.util.Properties;
+import static org.junit.Assert.assertEquals;
+
public class RestfulConnectionTest {
private static final String host = "127.0.0.1";
@@ -26,7 +28,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
} catch (SQLException e) {
e.printStackTrace();
}
@@ -38,7 +40,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -49,7 +51,7 @@ public class RestfulConnectionTest {
@Test
public void nativeSQL() throws SQLException {
String nativeSQL = conn.nativeSQL("select * from log.log");
- Assert.assertEquals("select * from log.log", nativeSQL);
+ assertEquals("select * from log.log", nativeSQL);
}
@Test
@@ -87,7 +89,7 @@ public class RestfulConnectionTest {
public void getMetaData() throws SQLException {
DatabaseMetaData meta = conn.getMetaData();
Assert.assertNotNull(meta);
- Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
+ assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
}
@Test
@@ -103,25 +105,25 @@ public class RestfulConnectionTest {
@Test
public void setCatalog() throws SQLException {
conn.setCatalog("test");
- Assert.assertEquals("test", conn.getCatalog());
+ assertEquals("test", conn.getCatalog());
}
@Test
public void getCatalog() throws SQLException {
conn.setCatalog("log");
- Assert.assertEquals("log", conn.getCatalog());
+ assertEquals("log", conn.getCatalog());
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void setTransactionIsolation() throws SQLException {
conn.setTransactionIsolation(Connection.TRANSACTION_NONE);
- Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
+ assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
}
@Test
public void getTransactionIsolation() throws SQLException {
- Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
+ assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
}
@Test
@@ -140,7 +142,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
}
@@ -152,7 +154,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
}
@@ -175,13 +177,13 @@ public class RestfulConnectionTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void setHoldability() throws SQLException {
conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
- Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
+ assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Test
public void getHoldability() throws SQLException {
- Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
+ assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -210,7 +212,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
@@ -222,7 +224,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
@@ -299,11 +301,11 @@ public class RestfulConnectionTest {
Properties info = conn.getClientInfo();
String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET);
- Assert.assertEquals("UTF-8", charset);
+ assertEquals("UTF-8", charset);
String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE);
- Assert.assertEquals("en_US.UTF-8", locale);
+ assertEquals("en_US.UTF-8", locale);
String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
- Assert.assertEquals("UTC-8", timezone);
+ assertEquals("UTC-8", timezone);
}
@Test
@@ -313,11 +315,11 @@ public class RestfulConnectionTest {
conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET);
- Assert.assertEquals("UTF-8", charset);
+ assertEquals("UTF-8", charset);
String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE);
- Assert.assertEquals("en_US.UTF-8", locale);
+ assertEquals("en_US.UTF-8", locale);
String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
- Assert.assertEquals("UTC-8", timezone);
+ assertEquals("UTC-8", timezone);
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -345,14 +347,15 @@ public class RestfulConnectionTest {
conn.abort(null);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void setNetworkTimeout() throws SQLException {
conn.setNetworkTimeout(null, 1000);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void getNetworkTimeout() throws SQLException {
- conn.getNetworkTimeout();
+ int timeout = conn.getNetworkTimeout();
+ assertEquals(0, timeout);
}
@Test
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java
deleted file mode 100644
index ccb0941da00e2755eaf8fde2553b8a8e6b33cd25..0000000000000000000000000000000000000000
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package com.taosdata.jdbc.utils;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-public class SqlSyntaxValidatorTest {
-
- @Test
- public void isSelectSQL() {
- Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather"));
- Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather"));
- Assert.assertTrue(SqlSyntaxValidator.isSelectSql(" select * from test.weather "));
- Assert.assertFalse(SqlSyntaxValidator.isSelectSql("insert into test.weather values(now, 1.1, 2)"));
- }
-
- @Test
- public void isUseSQL() {
- Assert.assertTrue(SqlSyntaxValidator.isUseSql("use database test"));
- }
-
-}
\ No newline at end of file
diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js
index f3961e3787c4fb6d7da7092b68632d08a8b57e20..5ba2739c35b1f0aef61ba3e52ae5d2f3a901df77 100644
--- a/src/connector/nodejs/nodetaos/cinterface.js
+++ b/src/connector/nodejs/nodetaos/cinterface.js
@@ -15,36 +15,18 @@ const { NULL_POINTER } = require('ref-napi');
module.exports = CTaosInterface;
-function convertMillisecondsToDatetime(time) {
- return new TaosObjects.TaosTimestamp(time);
-}
-function convertMicrosecondsToDatetime(time) {
- return new TaosObjects.TaosTimestamp(time * 0.001, true);
-}
-
-function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
- timestampConverter = convertMillisecondsToDatetime;
- if (micro == true) {
- timestampConverter = convertMicrosecondsToDatetime;
- }
+function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
while (currOffset < data.length) {
- let queue = [];
- let time = 0;
- for (let i = currOffset; i < currOffset + nbytes; i++) {
- queue.push(data[i]);
- }
- for (let i = queue.length - 1; i >= 0; i--) {
- time += queue[i] * Math.pow(16, i * 2);
- }
+ let time = data.readInt64LE(currOffset);
currOffset += nbytes;
- res.push(timestampConverter(time));
+ res.push(new TaosObjects.TaosTimestamp(time, precision));
}
return res;
}
-function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = new Array(data.length);
for (let i = 0; i < data.length; i++) {
@@ -60,7 +42,7 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
}
return res;
}
-function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -71,7 +53,7 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false
}
return res;
}
-function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -82,7 +64,7 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = fals
}
return res;
}
-function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -93,7 +75,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
}
return res;
}
-function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -104,7 +86,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -115,7 +97,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
@@ -126,28 +108,40 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false)
}
return res;
}
-function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+
+function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
+
let currOffset = 0;
while (currOffset < data.length) {
- let dataEntry = data.slice(currOffset, currOffset + nbytes);
- if (dataEntry[0] == FieldTypes.C_BINARY_NULL) {
- res.push(null);
- }
- else {
- res.push(ref.readCString(dataEntry));
+ let len = data.readIntLE(currOffset, 2);
+ let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
+ if (dataEntry[0] == 255) {
+ res.push(null)
+ } else {
+ res.push(dataEntry.toString("utf-8"));
}
currOffset += nbytes;
}
return res;
}
-function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) {
+
+function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
- let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
- //TODO: should use the correct character encoding
- res.push(dataEntry.toString("utf-8"));
+
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let len = data.readIntLE(currOffset, 2);
+ let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
+ if (dataEntry[0] == 255 && dataEntry[1] == 255) {
+ res.push(null)
+ } else {
+ res.push(dataEntry.toString("utf-8"));
+ }
+ currOffset += nbytes;
+ }
return res;
}
@@ -282,7 +276,7 @@ CTaosInterface.prototype.config = function config() {
CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
let _host, _user, _password, _db, _port;
try {
- _host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ _host = host != null ? ref.allocCString(host) : ref.NULL;
}
catch (err) {
throw "Attribute Error: host is expected as a str";
@@ -300,7 +294,7 @@ CTaosInterface.prototype.connect = function connect(host = null, user = "root",
throw "Attribute Error: password is expected as a str";
}
try {
- _db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ _db = db != null ? ref.allocCString(db) : ref.NULL;
}
catch (err) {
throw "Attribute Error: db is expected as a str";
@@ -355,8 +349,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
}
var fieldL = this.libtaos.taos_fetch_lengths(result);
-
- let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = this.libtaos.taos_result_precision(result);
var fieldlens = [];
@@ -383,7 +376,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
- blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision);
}
}
return { blocks: blocks, num_of_rows }
@@ -433,7 +426,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let row = cti.libtaos.taos_fetch_row(result2);
let fields = cti.fetchFields_a(result2);
- let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length);
blocks.fill(null);
numOfRows2 = Math.abs(numOfRows2);
@@ -459,7 +452,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
let prow = ref.reinterpret(row, 8, i * 8);
prow = prow.readPointer();
prow = ref.ref(prow);
- blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision);
//offset += fields[i]['bytes'] * numOfRows2;
}
}
@@ -582,7 +575,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
var cti = this;
let asyncCallbackWrapper = function (param2, result2, row) {
let fields = cti.fetchFields_a(result2);
- let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
+ let precision = cti.libtaos.taos_result_precision(result2);
let blocks = new Array(fields.length);
blocks.fill(null);
let numOfRows2 = 1;
@@ -592,7 +585,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb
if (!convertFunctions[fields[i]['type']]) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
- blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
+ blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision);
offset += fields[i]['bytes'] * numOfRows2;
}
}
diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js
index 809d17a016ac5aafc504c71f6417858e9d00821b..3bc0fe0aca060a32daa7a5cebd2dbfb99ac29a7c 100644
--- a/src/connector/nodejs/nodetaos/taosobjects.js
+++ b/src/connector/nodejs/nodetaos/taosobjects.js
@@ -1,5 +1,5 @@
const FieldTypes = require('./constants');
-
+const util = require('util');
/**
* Various objects such as TaosRow and TaosColumn that help make parsing data easier
* @module TaosObjects
@@ -14,7 +14,7 @@ const FieldTypes = require('./constants');
* var trow = new TaosRow(row);
* console.log(trow.data);
*/
-function TaosRow (row) {
+function TaosRow(row) {
this.data = row;
this.length = row.length;
return this;
@@ -29,10 +29,10 @@ function TaosRow (row) {
*/
function TaosField(field) {
- this._field = field;
- this.name = field.name;
- this.type = FieldTypes.getType(field.type);
- return this;
+ this._field = field;
+ this.name = field.name;
+ this.type = FieldTypes.getType(field.type);
+ return this;
}
/**
@@ -42,39 +42,111 @@ function TaosField(field) {
* @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000
*/
class TaosTimestamp extends Date {
- constructor(date, micro = false) {
- super(date);
- this._type = 'TaosTimestamp';
- if (micro) {
- this.microTime = date - Math.floor(date);
+ constructor(date, precision = 0) {
+ if (precision === 1) {
+ super(Math.floor(date / 1000));
+ this.precisionExtras = date % 1000;
+ } else if (precision === 2) {
+ // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
+ super(parseInt(BigInt(date) / 1000000n));
+ // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
+ this.precisionExtras = parseInt(BigInt(date) % 1000000n);
+ } else {
+ super(parseInt(date));
+ }
+ this.precision = precision;
+ }
+
+ /**
+ * TDengine raw timestamp.
+ * @returns raw taos timestamp (int64)
+ */
+ taosTimestamp() {
+ if (this.precision == 1) {
+ return (this * 1000 + this.precisionExtras);
+ } else if (this.precision == 2) {
+ return (this * 1000000 + this.precisionExtras);
+ } else {
+ return Math.floor(this);
+ }
+ }
+
+ /**
+ * Gets the microseconds of a Date.
+ * @return {Int} A microseconds integer
+ */
+ getMicroseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras / 1000;
+ } else {
+ return 0;
+ }
+ }
+ /**
+ * Gets the nanoseconds of a TaosTimestamp.
+ * @return {Int} A nanoseconds integer
+ */
+ getNanoseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras;
+ } else {
+ return 0;
+ }
+ }
+
+ /**
+ * @returns {String} a string for timestamp string format
+ */
+ _precisionExtra() {
+ if (this.precision == 1) {
+ return String(this.precisionExtras).padStart(3, '0');
+ } else if (this.precision == 2) {
+ return String(this.precisionExtras).padStart(6, '0');
+ } else {
+ return '';
}
}
/**
* @function Returns the date into a string usable by TDengine
* @return {string} A Taos Timestamp String
*/
- toTaosString(){
+ toTaosString() {
var tzo = -this.getTimezoneOffset(),
- dif = tzo >= 0 ? '+' : '-',
- pad = function(num) {
- var norm = Math.floor(Math.abs(num));
- return (norm < 10 ? '0' : '') + norm;
- },
- pad2 = function(num) {
- var norm = Math.floor(Math.abs(num));
- if (norm < 10) return '00' + norm;
- if (norm < 100) return '0' + norm;
- if (norm < 1000) return norm;
- };
+ dif = tzo >= 0 ? '+' : '-',
+ pad = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ return (norm < 10 ? '0' : '') + norm;
+ },
+ pad2 = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ if (norm < 10) return '00' + norm;
+ if (norm < 100) return '0' + norm;
+ if (norm < 1000) return norm;
+ };
return this.getFullYear() +
- '-' + pad(this.getMonth() + 1) +
- '-' + pad(this.getDate()) +
- ' ' + pad(this.getHours()) +
- ':' + pad(this.getMinutes()) +
- ':' + pad(this.getSeconds()) +
- '.' + pad2(this.getMilliseconds()) +
- '' + (this.microTime ? pad2(Math.round(this.microTime * 1000)) : '');
+ '-' + pad(this.getMonth() + 1) +
+ '-' + pad(this.getDate()) +
+ ' ' + pad(this.getHours()) +
+ ':' + pad(this.getMinutes()) +
+ ':' + pad(this.getSeconds()) +
+ '.' + pad2(this.getMilliseconds()) +
+ '' + this._precisionExtra();
+ }
+
+ /**
+ * Custom console.log
+ * @returns {String} string format for debug
+ */
+ [util.inspect.custom](depth, opts) {
+ return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts);
+ }
+ toString() {
+ return this.toTaosString();
}
}
-module.exports = {TaosRow, TaosField, TaosTimestamp}
+module.exports = { TaosRow, TaosField, TaosTimestamp }
diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json
index d21b62108b14e5a132ad5457d190bbcbc58b73a8..6a2c66100b3d1921b3ce8997e70d33f024e5c3f2 100644
--- a/src/connector/nodejs/package.json
+++ b/src/connector/nodejs/package.json
@@ -1,13 +1,13 @@
{
"name": "td2.0-connector",
- "version": "2.0.7",
+ "version": "2.0.10",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {
"test": "test"
},
"scripts": {
- "test": "node test/test.js"
+ "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js"
},
"repository": {
"type": "git",
diff --git a/src/connector/nodejs/tdengine.js b/src/connector/nodejs/tdengine.js
index aa296279d5e20f3d049d478ea2af44ea47a2b8e3..047c744a4fc90c6306e851eaa529a7f9f578fe12 100644
--- a/src/connector/nodejs/tdengine.js
+++ b/src/connector/nodejs/tdengine.js
@@ -1,4 +1,4 @@
var TDengineConnection = require('./nodetaos/connection.js')
-module.exports.connect = function (connection=null) {
+module.exports.connect = function (connection={}) {
return new TDengineConnection(connection);
}
diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js
index bf4bb2c54188d3eb0f9c7fb5306912effc7b0760..caf05955da4c960ebedc872f400c17d18be767dd 100644
--- a/src/connector/nodejs/test/test.js
+++ b/src/connector/nodejs/test/test.js
@@ -1,5 +1,5 @@
const taos = require('../tdengine');
-var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10});
+var conn = taos.connect();
var c1 = conn.cursor();
let stime = new Date();
let interval = 1000;
diff --git a/src/connector/nodejs/test/testMicroseconds.js b/src/connector/nodejs/test/testMicroseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f
--- /dev/null
+++ b/src/connector/nodejs/test/testMicroseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_us';
+c1.execute('create database if not exists ' + dbname + ' precision "us"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914) {
+ throw "microseconds not match!";
+}
+if (ts.getMicroseconds() % 1000 !== 914) {
+ throw "micronsecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_us_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/src/connector/nodejs/test/testNanoseconds.js b/src/connector/nodejs/test/testNanoseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea
--- /dev/null
+++ b/src/connector/nodejs/test/testNanoseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_ns';
+c1.execute('create database if not exists ' + dbname + ' precision "ns"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914405, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914405) {
+ throw "nanosecond not match!";
+}
+if (ts.getNanoseconds() % 1000000 !== 914405) {
+ throw "nanosecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_ns_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js
new file mode 100644
index 0000000000000000000000000000000000000000..68fad89c22894ec358d55e9c03746fbd86ce0c99
--- /dev/null
+++ b/src/connector/nodejs/test/testnchar.js
@@ -0,0 +1,33 @@
+const taos = require('../tdengine');
+var conn = taos.connect({ host: "localhost" });
+var c1 = conn.cursor();
+
+
+function checkData(data, row, col, expect) {
+ let checkdata = data[row][col];
+ if (checkdata == expect) {
+ // console.log('check pass')
+ }
+ else {
+ console.log('check failed, expect ' + expect + ', but is ' + checkdata)
+ }
+}
+
+c1.execute('drop database if exists testnodejsnchar')
+c1.execute('create database testnodejsnchar')
+c1.execute('use testnodejsnchar');
+c1.execute('create table tb (ts timestamp, value float, text binary(200))')
+c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") -
+c1.execute('insert into tb values(1623254400150, 24.7, NULL);')
+c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");')
+sql = 'select * from tb;'
+
+console.log('*******************************************')
+
+c1.execute(sql);
+data = c1.fetchall();
+console.log(data)
+//check data about insert data
+checkData(data, 0, 2, '中文10000000000000000000000')
+checkData(data, 1, 2, null)
+checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000')
\ No newline at end of file
diff --git a/src/connector/odbc/examples/c/main.c b/src/connector/odbc/examples/c/main.c
index e36c75688e3440a62b66fa5fc2f8b13b83f55237..de01d2b85eda0a6851ceede8b1932ce6d9231595 100644
--- a/src/connector/odbc/examples/c/main.c
+++ b/src/connector/odbc/examples/c/main.c
@@ -18,8 +18,8 @@
#define CHK_TEST(statement) \
do { \
D("testing: %s", #statement); \
- int r = (statement); \
- if (r) { \
+ int _r = (statement); \
+ if (_r) { \
D("testing failed: %s", #statement); \
return 1; \
} \
@@ -181,7 +181,7 @@ static int do_statement(SQLHSTMT stmt, const char *statement) {
r = traverse_cols(stmt, cols);
char buf[4096];
while (1) {
- SQLRETURN r = SQLFetch(stmt);
+ r = SQLFetch(stmt);
if (r==SQL_NO_DATA) break;
CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "");
for (size_t i=0; itsdb_params = tsdb_params;
for (int i=0; i None
+ if p_result == None:
+ return
+ result = TaosResult(p_result)
+ if code == 0:
+ result.fetch_rows_a(fetch_callback, p_param)
+ result.check_error(code)
+
+
+class Counter(Structure):
+ _fields_ = [("count", c_int), ("done", c_bool)]
+
+ def __str__(self):
+ return "{ count: %d, done: %s }" % (self.count, self.done)
+
+
+def test_query(conn):
+ # type: (TaosConnection) -> None
+ counter = Counter(count=0)
+ conn.query_a("select * from log.log", query_callback, byref(counter))
+
+ while not counter.done:
+ print("wait query callback")
+ time.sleep(1)
+ print(counter)
+ conn.close()
+
+
+if __name__ == "__main__":
+ test_query(connect())
+```
+
+### Statement API - Bind row after row
+
+```python
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.exec(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_bind_params(16)
+params[0].timestamp(1626861392589)
+params[1].bool(True)
+params[2].null()
+params[3].tinyint(2)
+params[4].smallint(3)
+params[5].int(4)
+params[6].bigint(5)
+params[7].tinyint_unsigned(6)
+params[8].smallint_unsigned(7)
+params[9].int_unsigned(8)
+params[10].bigint_unsigned(9)
+params[11].float(10.1)
+params[12].double(10.11)
+params[13].binary("hello")
+params[14].nchar("stmt")
+params[15].timestamp(1626861392589)
+stmt.bind_param(params)
+
+params[0].timestamp(1626861392590)
+params[15].null()
+stmt.bind_param(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 2
+result.close()
+
+result = conn.query("select * from log")
+
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
+
+```
+
+### Statement API - Bind multi rows
+
+```python
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.exec(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[15].timestamp([None, None, 1626861392591])
+stmt.bind_param_batch(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 3
+result.close()
+
+result = conn.query("select * from log")
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
+```
+
+### Statement API - Subscribe
+
+```python
+import taos
+
+conn = taos.connect()
+dbname = "pytest_taos_subscribe_callback"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+conn.exec("create table if not exists log(ts timestamp, n int)")
+for i in range(10):
+ conn.exec("insert into log values(now, %d)" % i)
+
+sub = conn.subscribe(True, "test", "select * from log", 1000)
+print("# consume from begin")
+for ts, n in sub.consume():
+ print(ts, n)
+
+print("# consume new data")
+for i in range(5):
+ conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
+ result = sub.consume()
+ for ts, n in result:
+ print(ts, n)
+
+print("# consume with a stop condition")
+for i in range(10):
+ conn.exec("insert into log values(now, %d)" % int(random() * 10))
+ result = sub.consume()
+ try:
+ ts, n = next(result)
+ print(ts, n)
+ if n > 5:
+ result.stop_query()
+ print("## stopped")
+ break
+ except StopIteration:
+ continue
+
+sub.close()
+
+conn.exec("drop database if exists %s" % dbname)
+conn.close()
+```
+
+### Statement API - Subscribe asynchronously with callback
+
+```python
+from taos import *
+from ctypes import *
+
+import time
+
+
+def subscribe_callback(p_sub, p_result, p_param, errno):
+ # type: (c_void_p, c_void_p, c_void_p, c_int) -> None
+ print("# fetch in callback")
+ result = TaosResult(p_result)
+ result.check_error(errno)
+ for row in result.rows_iter():
+ ts, n = row()
+ print(ts, n)
+
+
+def test_subscribe_callback(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_subscribe_callback"
+ try:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.exec("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.exec("create table if not exists log(ts timestamp, n int)")
+
+ print("# subscribe with callback")
+ sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
+
+ for i in range(10):
+ conn.exec("insert into log values(now, %d)" % i)
+ time.sleep(0.7)
+ sub.close()
+
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_subscribe_callback(connect())
+
+```
+
+### Statement API - Stream
+
+```python
+from taos import *
+from ctypes import *
+
+def stream_callback(p_param, p_result, p_row):
+ # type: (c_void_p, c_void_p, c_void_p) -> None
+
+ if p_result == None or p_row == None:
+ return
+ result = TaosResult(p_result)
+ row = TaosRow(result, p_row)
+ try:
+ ts, count = row()
+ p = cast(p_param, POINTER(Counter))
+ p.contents.count += count
+ print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
+
+ except Exception as err:
+ print(err)
+ raise err
+
+
+class Counter(ctypes.Structure):
+ _fields_ = [
+ ("count", c_int),
+ ]
+
+ def __str__(self):
+ return "%d" % self.count
+
+
+def test_stream(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_stream"
+ try:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.exec("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.exec("create table if not exists log(ts timestamp, n int)")
+
+ result = conn.query("select count(*) from log interval(5s)")
+ assert result.field_count == 2
+ counter = Counter()
+ counter.count = 0
+ stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
+
+ for _ in range(0, 20):
+ conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
+ time.sleep(2)
+ stream.close()
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_stream(connect())
+```
+
+### Insert with line protocol
+
+```python
+import taos
+
+conn = taos.connect()
+dbname = "pytest_line"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s precision 'us'" % dbname)
+conn.select_db(dbname)
+
+lines = [
+ 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
+ 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+]
+conn.insert_lines(lines)
+print("inserted")
+
+lines = [
+ 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+]
+conn.insert_lines(lines)
+
+result = conn.query("show tables")
+for row in result:
+ print(row)
+result.close()
+
+
+conn.exec("drop database if exists %s" % dbname)
+conn.close()
+
+```
+
+## License - AGPL-3.0
Keep same with [TDengine](https://github.com/taosdata/TDengine).
diff --git a/src/connector/python/examples/bind-multi.py b/src/connector/python/examples/bind-multi.py
new file mode 100644
index 0000000000000000000000000000000000000000..8530253aef58079e01f5eb71d8e12ab1649b7731
--- /dev/null
+++ b/src/connector/python/examples/bind-multi.py
@@ -0,0 +1,50 @@
+# encoding:UTF-8
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt_multi"
+conn.execute("drop database if exists %s" % dbname)
+conn.execute("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[15].timestamp([None, None, 1626861392591])
+stmt.bind_param_batch(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 3
+result.close()
+
+result = conn.query("select * from log")
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
\ No newline at end of file
diff --git a/src/connector/python/examples/bind-row.py b/src/connector/python/examples/bind-row.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ab9a9167ad23a6167c6586aac30ae6941dcee6d
--- /dev/null
+++ b/src/connector/python/examples/bind-row.py
@@ -0,0 +1,57 @@
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt"
+conn.execute("drop database if exists %s" % dbname)
+conn.execute("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_bind_params(16)
+params[0].timestamp(1626861392589)
+params[1].bool(True)
+params[2].null()
+params[3].tinyint(2)
+params[4].smallint(3)
+params[5].int(4)
+params[6].bigint(5)
+params[7].tinyint_unsigned(6)
+params[8].smallint_unsigned(7)
+params[9].int_unsigned(8)
+params[10].bigint_unsigned(9)
+params[11].float(10.1)
+params[12].double(10.11)
+params[13].binary("hello")
+params[14].nchar("stmt")
+params[15].timestamp(1626861392589)
+stmt.bind_param(params)
+
+params[0].timestamp(1626861392590)
+params[15].null()
+stmt.bind_param(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 2
+# No need to explicitly close, but ok for you
+# result.close()
+
+result = conn.query("select * from log")
+
+for row in result:
+ print(row)
+
+# No need to explicitly close, but ok for you
+# result.close()
+# stmt.close()
+# conn.close()
diff --git a/src/connector/python/examples/insert-lines.py b/src/connector/python/examples/insert-lines.py
new file mode 100644
index 0000000000000000000000000000000000000000..0096b7e8cdf1328ee78805a1ee3134ad7cdfc447
--- /dev/null
+++ b/src/connector/python/examples/insert-lines.py
@@ -0,0 +1,22 @@
+import taos
+
+conn = taos.connect()
+dbname = "pytest_line"
+conn.execute("drop database if exists %s" % dbname)
+conn.execute("create database if not exists %s precision 'us'" % dbname)
+conn.select_db(dbname)
+
+lines = [
+ 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns',
+]
+conn.insert_lines(lines)
+print("inserted")
+
+conn.insert_lines(lines)
+
+result = conn.query("show tables")
+for row in result:
+ print(row)
+
+
+conn.execute("drop database if exists %s" % dbname)
diff --git a/src/connector/python/examples/pep-249.py b/src/connector/python/examples/pep-249.py
new file mode 100644
index 0000000000000000000000000000000000000000..971a3c401f00b982096b8d429f65bce73cca4760
--- /dev/null
+++ b/src/connector/python/examples/pep-249.py
@@ -0,0 +1,9 @@
+import taos
+
+conn = taos.connect()
+cursor = conn.cursor()
+
+cursor.execute("show databases")
+results = cursor.fetchall()
+for row in results:
+ print(row)
diff --git a/src/connector/python/examples/query-async.py b/src/connector/python/examples/query-async.py
new file mode 100644
index 0000000000000000000000000000000000000000..b600b796974e47d5e5fc7d88998e95ba46bb92cd
--- /dev/null
+++ b/src/connector/python/examples/query-async.py
@@ -0,0 +1,62 @@
+from taos import *
+from ctypes import *
+import time
+
+def fetch_callback(p_param, p_result, num_of_rows):
+ print("fetched ", num_of_rows, "rows")
+ p = cast(p_param, POINTER(Counter))
+ result = TaosResult(p_result)
+
+ if num_of_rows == 0:
+ print("fetching completed")
+ p.contents.done = True
+ # should explicitly close the result in fetch completed or cause error
+ result.close()
+ return
+ if num_of_rows < 0:
+ p.contents.done = True
+ result.check_error(num_of_rows)
+ result.close()
+ return None
+
+ for row in result.rows_iter(num_of_rows):
+ # print(row)
+ None
+ p.contents.count += result.row_count
+ result.fetch_rows_a(fetch_callback, p_param)
+
+
+
+def query_callback(p_param, p_result, code):
+ # type: (c_void_p, c_void_p, c_int) -> None
+ if p_result == None:
+ return
+ result = TaosResult(p_result)
+ if code == 0:
+ result.fetch_rows_a(fetch_callback, p_param)
+ result.check_error(code)
+ # explicitly close result while query failed
+ result.close()
+
+
+class Counter(Structure):
+ _fields_ = [("count", c_int), ("done", c_bool)]
+
+ def __str__(self):
+ return "{ count: %d, done: %s }" % (self.count, self.done)
+
+
+def test_query(conn):
+ # type: (TaosConnection) -> None
+ counter = Counter(count=0)
+ conn.query_a("select * from log.log", query_callback, byref(counter))
+
+ while not counter.done:
+ print("wait query callback")
+ time.sleep(1)
+ print(counter)
+ # conn.close()
+
+
+if __name__ == "__main__":
+ test_query(connect())
\ No newline at end of file
diff --git a/src/connector/python/examples/query-objectively.py b/src/connector/python/examples/query-objectively.py
new file mode 100644
index 0000000000000000000000000000000000000000..104347cbf91e29e62fef26477b475053a8b8bc3e
--- /dev/null
+++ b/src/connector/python/examples/query-objectively.py
@@ -0,0 +1,12 @@
+import taos
+
+conn = taos.connect()
+conn.execute("create database if not exists pytest")
+
+result = conn.query("show databases")
+num_of_fields = result.field_count
+for field in result.fields:
+ print(field)
+for row in result:
+ print(row)
+conn.execute("drop database pytest")
diff --git a/src/connector/python/examples/subscribe-async.py b/src/connector/python/examples/subscribe-async.py
new file mode 100644
index 0000000000000000000000000000000000000000..3782ce5505152e78838406e313094eb911bea4a2
--- /dev/null
+++ b/src/connector/python/examples/subscribe-async.py
@@ -0,0 +1,43 @@
+from taos import *
+from ctypes import *
+
+import time
+
+
+def subscribe_callback(p_sub, p_result, p_param, errno):
+ # type: (c_void_p, c_void_p, c_void_p, c_int) -> None
+ print("# fetch in callback")
+ result = TaosResult(p_result)
+ result.check_error(errno)
+ for row in result.rows_iter():
+ ts, n = row()
+ print(ts, n)
+
+
+def test_subscribe_callback(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_subscribe_callback"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, n int)")
+
+ print("# subscribe with callback")
+ sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
+
+ for i in range(10):
+ conn.execute("insert into log values(now, %d)" % i)
+ time.sleep(0.7)
+ # sub.close()
+
+ conn.execute("drop database if exists %s" % dbname)
+ # conn.close()
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ # conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_subscribe_callback(connect())
diff --git a/src/connector/python/examples/subscribe-sync.py b/src/connector/python/examples/subscribe-sync.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a7f65f460280924ed3a577fe55b975fbf12c1a3
--- /dev/null
+++ b/src/connector/python/examples/subscribe-sync.py
@@ -0,0 +1,53 @@
+import taos
+import random
+
+conn = taos.connect()
+dbname = "pytest_taos_subscribe"
+conn.execute("drop database if exists %s" % dbname)
+conn.execute("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+conn.execute("create table if not exists log(ts timestamp, n int)")
+for i in range(10):
+ conn.execute("insert into log values(now, %d)" % i)
+
+sub = conn.subscribe(False, "test", "select * from log", 1000)
+print("# consume from begin")
+for ts, n in sub.consume():
+ print(ts, n)
+
+print("# consume new data")
+for i in range(5):
+ conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
+ result = sub.consume()
+ for ts, n in result:
+ print(ts, n)
+
+sub.close(True)
+print("# keep progress consume")
+sub = conn.subscribe(False, "test", "select * from log", 1000)
+result = sub.consume()
+rows = result.fetch_all()
+# consume from latest subscription needs root privilege(for /var/lib/taos).
+assert result.row_count == 0
+print("## consumed ", len(rows), "rows")
+
+print("# consume with a stop condition")
+for i in range(10):
+ conn.execute("insert into log values(now, %d)" % random.randint(0, 10))
+ result = sub.consume()
+ try:
+ ts, n = next(result)
+ print(ts, n)
+ if n > 5:
+ result.stop_query()
+ print("## stopped")
+ break
+ except StopIteration:
+ continue
+
+sub.close()
+
+# sub.close()
+
+conn.execute("drop database if exists %s" % dbname)
+# conn.close()
diff --git a/src/connector/python/pyproject.toml b/src/connector/python/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..a8099199563a0e5957a7d69e75bab65cca6d17db
--- /dev/null
+++ b/src/connector/python/pyproject.toml
@@ -0,0 +1,27 @@
+[tool.poetry]
+name = "taos"
+version = "2.1.0"
+description = "TDengine connector for python"
+authors = ["Taosdata Inc. "]
+license = "AGPL-3.0"
+readme = "README.md"
+
+[tool.poetry.dependencies]
+python = "^2.7 || ^3.4"
+typing = "*"
+
+[tool.poetry.dev-dependencies]
+pytest = [
+ { version = "^4.6", python = "^2.7" },
+ { version = "^6.2", python = "^3.7" }
+]
+pdoc = { version = "^7.1.1", python = "^3.7" }
+mypy = { version = "^0.910", python = "^3.6" }
+black = { version = "^21.7b0", python = "^3.6" }
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.black]
+line-length = 119
diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py
index 901e8396c0440bd1c90163f0360b687dd5684ff3..b7e10001737bc40c04173ea4a65e95248965ffda 100644
--- a/src/connector/python/setup.py
+++ b/src/connector/python/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.10",
+ version="2.1.0",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py
index 52c6db311ecc4c2f944372ae3334fdc58cb6e779..75138eade3d60f7894d814babe58cec7aecc9a20 100644
--- a/src/connector/python/taos/__init__.py
+++ b/src/connector/python/taos/__init__.py
@@ -1,20 +1,478 @@
+# encoding:UTF-8
+"""
+# TDengine Connector for Python
-from .connection import TDengineConnection
-from .cursor import TDengineCursor
+[TDengine](https://github.com/taosdata/TDengine) connector for Python enables python programs to access TDengine,
+ using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications.
-# For some reason, the following is needed for VS Code (through PyLance) to
+## Install
+
+```sh
+git clone --depth 1 https://github.com/taosdata/TDengine.git
+pip install ./TDengine/src/connector/python
+```
+
+## Source Code
+
+[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
+
+## Examples
+
+### Query with PEP-249 API
+
+```python
+import taos
+
+conn = taos.connect()
+cursor = conn.cursor()
+
+cursor.execute("show databases")
+results = cursor.fetchall()
+for row in results:
+ print(row)
+cursor.close()
+conn.close()
+```
+
+### Query with objective API
+
+```python
+import taos
+
+conn = taos.connect()
+conn.exec("create database if not exists pytest")
+
+result = conn.query("show databases")
+num_of_fields = result.field_count
+for field in result.fields:
+ print(field)
+for row in result:
+ print(row)
+result.close()
+conn.exec("drop database pytest")
+conn.close()
+```
+
+### Query with async API
+
+```python
+from taos import *
+from ctypes import *
+import time
+
+def fetch_callback(p_param, p_result, num_of_rows):
+ print("fetched ", num_of_rows, "rows")
+ p = cast(p_param, POINTER(Counter))
+ result = TaosResult(p_result)
+
+ if num_of_rows == 0:
+ print("fetching completed")
+ p.contents.done = True
+ result.close()
+ return
+ if num_of_rows < 0:
+ p.contents.done = True
+ result.check_error(num_of_rows)
+ result.close()
+ return None
+
+ for row in result.rows_iter(num_of_rows):
+ # print(row)
+ None
+ p.contents.count += result.row_count
+ result.fetch_rows_a(fetch_callback, p_param)
+
+
+
+def query_callback(p_param, p_result, code):
+ # type: (c_void_p, c_void_p, c_int) -> None
+ if p_result == None:
+ return
+ result = TaosResult(p_result)
+ if code == 0:
+ result.fetch_rows_a(fetch_callback, p_param)
+ result.check_error(code)
+
+
+class Counter(Structure):
+ _fields_ = [("count", c_int), ("done", c_bool)]
+
+ def __str__(self):
+ return "{ count: %d, done: %s }" % (self.count, self.done)
+
+
+def test_query(conn):
+ # type: (TaosConnection) -> None
+ counter = Counter(count=0)
+ conn.query_a("select * from log.log", query_callback, byref(counter))
+
+ while not counter.done:
+ print("wait query callback")
+ time.sleep(1)
+ print(counter)
+ conn.close()
+
+
+if __name__ == "__main__":
+ test_query(connect())
+```
+
+### Statement API - Bind row after row
+
+```python
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.exec(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_bind_params(16)
+params[0].timestamp(1626861392589)
+params[1].bool(True)
+params[2].null()
+params[3].tinyint(2)
+params[4].smallint(3)
+params[5].int(4)
+params[6].bigint(5)
+params[7].tinyint_unsigned(6)
+params[8].smallint_unsigned(7)
+params[9].int_unsigned(8)
+params[10].bigint_unsigned(9)
+params[11].float(10.1)
+params[12].double(10.11)
+params[13].binary("hello")
+params[14].nchar("stmt")
+params[15].timestamp(1626861392589)
+stmt.bind_param(params)
+
+params[0].timestamp(1626861392590)
+params[15].null()
+stmt.bind_param(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 2
+result.close()
+
+result = conn.query("select * from log")
+
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
+
+```
+
+### Statement API - Bind multi rows
+
+```python
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.exec(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \\
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \\
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \\
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[15].timestamp([None, None, 1626861392591])
+stmt.bind_param_batch(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 3
+result.close()
+
+result = conn.query("select * from log")
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
+```
+
+### Statement API - Subscribe
+
+```python
+import taos
+
+conn = taos.connect()
+dbname = "pytest_taos_subscribe_callback"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+conn.exec("create table if not exists log(ts timestamp, n int)")
+for i in range(10):
+ conn.exec("insert into log values(now, %d)" % i)
+
+sub = conn.subscribe(True, "test", "select * from log", 1000)
+print("# consume from begin")
+for ts, n in sub.consume():
+ print(ts, n)
+
+print("# consume new data")
+for i in range(5):
+ conn.exec("insert into log values(now, %d)(now+1s, %d)" % (i, i))
+ result = sub.consume()
+ for ts, n in result:
+ print(ts, n)
+
+print("# consume with a stop condition")
+for i in range(10):
+ conn.exec("insert into log values(now, %d)" % int(random() * 10))
+ result = sub.consume()
+ try:
+ ts, n = next(result)
+ print(ts, n)
+ if n > 5:
+ result.stop_query()
+ print("## stopped")
+ break
+ except StopIteration:
+ continue
+
+sub.close()
+
+conn.exec("drop database if exists %s" % dbname)
+conn.close()
+```
+
+### Statement API - Subscribe asynchronously with callback
+
+```python
+from taos import *
+from ctypes import *
+
+import time
+
+
+def subscribe_callback(p_sub, p_result, p_param, errno):
+ # type: (c_void_p, c_void_p, c_void_p, c_int) -> None
+ print("# fetch in callback")
+ result = TaosResult(p_result)
+ result.check_error(errno)
+ for row in result.rows_iter():
+ ts, n = row()
+ print(ts, n)
+
+
+def test_subscribe_callback(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_subscribe_callback"
+ try:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.exec("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.exec("create table if not exists log(ts timestamp, n int)")
+
+ print("# subscribe with callback")
+ sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
+
+ for i in range(10):
+ conn.exec("insert into log values(now, %d)" % i)
+ time.sleep(0.7)
+ sub.close()
+
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_subscribe_callback(connect())
+
+```
+
+### Statement API - Stream
+
+```python
+from taos import *
+from ctypes import *
+
+def stream_callback(p_param, p_result, p_row):
+ # type: (c_void_p, c_void_p, c_void_p) -> None
+
+ if p_result == None or p_row == None:
+ return
+ result = TaosResult(p_result)
+ row = TaosRow(result, p_row)
+ try:
+ ts, count = row()
+ p = cast(p_param, POINTER(Counter))
+ p.contents.count += count
+ print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
+
+ except Exception as err:
+ print(err)
+ raise err
+
+
+class Counter(ctypes.Structure):
+ _fields_ = [
+ ("count", c_int),
+ ]
+
+ def __str__(self):
+ return "%d" % self.count
+
+
+def test_stream(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_stream"
+ try:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.exec("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.exec("create table if not exists log(ts timestamp, n int)")
+
+ result = conn.query("select count(*) from log interval(5s)")
+ assert result.field_count == 2
+ counter = Counter()
+ counter.count = 0
+ stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
+
+ for _ in range(0, 20):
+ conn.exec("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
+ time.sleep(2)
+ stream.close()
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.exec("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_stream(connect())
+```
+
+### Insert with line protocol
+
+```python
+import taos
+
+conn = taos.connect()
+dbname = "pytest_line"
+conn.exec("drop database if exists %s" % dbname)
+conn.exec("create database if not exists %s precision 'us'" % dbname)
+conn.select_db(dbname)
+
+lines = [
+ 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
+ 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+]
+conn.insert_lines(lines)
+print("inserted")
+
+lines = [
+ 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+]
+conn.insert_lines(lines)
+
+result = conn.query("show tables")
+for row in result:
+ print(row)
+result.close()
+
+
+conn.exec("drop database if exists %s" % dbname)
+conn.close()
+
+```
+
+## License - AGPL-3.0
+
+Keep same with [TDengine](https://github.com/taosdata/TDengine).
+"""
+from .connection import TaosConnection
+
+# For some reason, the following is needed for VS Code (through PyLance) to
# recognize that "error" is a valid module of the "taos" package.
-from .error import ProgrammingError
+from .error import *
+from .bind import *
+from .field import *
+from .cursor import *
+from .result import *
+from .statement import *
+from .subscription import *
+
+try:
+ import importlib.metadata
+
+ __version__ = importlib.metadata.version("taos")
+except:
+ None
# Globals
threadsafety = 0
-paramstyle = 'pyformat'
-
-__all__ = ['connection', 'cursor']
+paramstyle = "pyformat"
+__all__ = [
+ # functions
+ "connect",
+ "new_bind_param",
+ "new_bind_params",
+ "new_multi_binds",
+ "new_multi_bind",
+ # objects
+ "TaosBind",
+ "TaosConnection",
+ "TaosCursor",
+ "TaosResult",
+ "TaosRows",
+ "TaosRow",
+ "TaosStmt",
+ "PrecisionEnum",
+]
def connect(*args, **kwargs):
- """ Function to return a TDengine connector object
+ # type: (..., ...) -> TaosConnection
+ """Function to return a TDengine connector object
Current supporting keyword parameters:
@dsn: Data source name as string
@@ -25,4 +483,4 @@ def connect(*args, **kwargs):
@rtype: TDengineConnector
"""
- return TDengineConnection(*args, **kwargs)
+ return TaosConnection(*args, **kwargs)
diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py
new file mode 100644
index 0000000000000000000000000000000000000000..083ddc99aea8dc6f39b1f22ac5f77d2584a2fe69
--- /dev/null
+++ b/src/connector/python/taos/bind.py
@@ -0,0 +1,437 @@
+# encoding:UTF-8
+import ctypes
+from .constants import FieldType
+from .error import *
+from .precision import *
+from datetime import datetime
+from ctypes import *
+import sys
+
+_datetime_epoch = datetime.utcfromtimestamp(0)
+
+def _is_not_none(obj):
+ return obj != None
+
+class TaosBind(ctypes.Structure):
+ _fields_ = [
+ ("buffer_type", c_int),
+ ("buffer", c_void_p),
+ ("buffer_length", c_size_t),
+ ("length", POINTER(c_size_t)),
+ ("is_null", POINTER(c_int)),
+ ("is_unsigned", c_int),
+ ("error", POINTER(c_int)),
+ ("u", c_int64),
+ ("allocated", c_int),
+ ]
+
+ def null(self):
+ self.buffer_type = FieldType.C_NULL
+ self.is_null = pointer(c_int(1))
+
+ def bool(self, value):
+ self.buffer_type = FieldType.C_BOOL
+ self.buffer = cast(pointer(c_bool(value)), c_void_p)
+ self.buffer_length = sizeof(c_bool)
+
+ def tinyint(self, value):
+ self.buffer_type = FieldType.C_TINYINT
+ self.buffer = cast(pointer(c_int8(value)), c_void_p)
+ self.buffer_length = sizeof(c_int8)
+
+ def smallint(self, value):
+ self.buffer_type = FieldType.C_SMALLINT
+ self.buffer = cast(pointer(c_int16(value)), c_void_p)
+ self.buffer_length = sizeof(c_int16)
+
+ def int(self, value):
+ self.buffer_type = FieldType.C_INT
+ self.buffer = cast(pointer(c_int32(value)), c_void_p)
+ self.buffer_length = sizeof(c_int32)
+
+ def bigint(self, value):
+ self.buffer_type = FieldType.C_BIGINT
+ self.buffer = cast(pointer(c_int64(value)), c_void_p)
+ self.buffer_length = sizeof(c_int64)
+
+ def float(self, value):
+ self.buffer_type = FieldType.C_FLOAT
+ self.buffer = cast(pointer(c_float(value)), c_void_p)
+ self.buffer_length = sizeof(c_float)
+
+ def double(self, value):
+ self.buffer_type = FieldType.C_DOUBLE
+ self.buffer = cast(pointer(c_double(value)), c_void_p)
+ self.buffer_length = sizeof(c_double)
+
+ def binary(self, value):
+ buffer = None
+ length = 0
+ if isinstance(value, str):
+ bytes = value.encode("utf-8")
+ buffer = create_string_buffer(bytes)
+ length = len(bytes)
+ else:
+ buffer = value
+ length = len(value)
+ self.buffer_type = FieldType.C_BINARY
+ self.buffer = cast(buffer, c_void_p)
+ self.buffer_length = length
+ self.length = pointer(c_size_t(self.buffer_length))
+
+ def timestamp(self, value, precision=PrecisionEnum.Milliseconds):
+ if type(value) is datetime:
+ if precision == PrecisionEnum.Milliseconds:
+ ts = int(round((value - _datetime_epoch).total_seconds() * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ ts = int(round((value - _datetime_epoch).total_seconds() * 10000000))
+ else:
+ raise PrecisionError("datetime do not support nanosecond precision")
+ elif type(value) is float:
+ if precision == PrecisionEnum.Milliseconds:
+ ts = int(round(value * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ ts = int(round(value * 10000000))
+ else:
+ raise PrecisionError("time float do not support nanosecond precision")
+ elif isinstance(value, int) and not isinstance(value, bool):
+ ts = value
+ elif isinstance(value, str):
+ value = datetime.fromisoformat(value)
+ if precision == PrecisionEnum.Milliseconds:
+ ts = int(round(value * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ ts = int(round(value * 10000000))
+ else:
+ raise PrecisionError("datetime do not support nanosecond precision")
+
+ self.buffer_type = FieldType.C_TIMESTAMP
+ self.buffer = cast(pointer(c_int64(ts)), c_void_p)
+ self.buffer_length = sizeof(c_int64)
+
+ def nchar(self, value):
+ buffer = None
+ length = 0
+ if isinstance(value, str):
+ bytes = value.encode("utf-8")
+ buffer = create_string_buffer(bytes)
+ length = len(bytes)
+ else:
+ buffer = value
+ length = len(value)
+ self.buffer_type = FieldType.C_NCHAR
+ self.buffer = cast(buffer, c_void_p)
+ self.buffer_length = length
+ self.length = pointer(c_size_t(self.buffer_length))
+
+ def tinyint_unsigned(self, value):
+ self.buffer_type = FieldType.C_TINYINT_UNSIGNED
+ self.buffer = cast(pointer(c_uint8(value)), c_void_p)
+ self.buffer_length = sizeof(c_uint8)
+
+ def smallint_unsigned(self, value):
+ self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
+ self.buffer = cast(pointer(c_uint16(value)), c_void_p)
+ self.buffer_length = sizeof(c_uint16)
+
+ def int_unsigned(self, value):
+ self.buffer_type = FieldType.C_INT_UNSIGNED
+ self.buffer = cast(pointer(c_uint32(value)), c_void_p)
+ self.buffer_length = sizeof(c_uint32)
+
+ def bigint_unsigned(self, value):
+ self.buffer_type = FieldType.C_BIGINT_UNSIGNED
+ self.buffer = cast(pointer(c_uint64(value)), c_void_p)
+ self.buffer_length = sizeof(c_uint64)
+
+
+def _datetime_to_timestamp(value, precision):
+ # type: (datetime | float | int | str | c_int64, PrecisionEnum) -> c_int64
+ if value is None:
+ return FieldType.C_BIGINT_NULL
+ if type(value) is datetime:
+ if precision == PrecisionEnum.Milliseconds:
+ return int(round((value - _datetime_epoch).total_seconds() * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ return int(round((value - _datetime_epoch).total_seconds() * 10000000))
+ else:
+ raise PrecisionError("datetime do not support nanosecond precision")
+ elif type(value) is float:
+ if precision == PrecisionEnum.Milliseconds:
+ return int(round(value * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ return int(round(value * 10000000))
+ else:
+ raise PrecisionError("time float do not support nanosecond precision")
+ elif isinstance(value, int) and not isinstance(value, bool):
+ return c_int64(value)
+ elif isinstance(value, str):
+ value = datetime.fromisoformat(value)
+ if precision == PrecisionEnum.Milliseconds:
+ return int(round(value * 1000))
+ elif precision == PrecisionEnum.Microseconds:
+ return int(round(value * 10000000))
+ else:
+ raise PrecisionError("datetime do not support nanosecond precision")
+ elif isinstance(value, c_int64):
+ return value
+ return FieldType.C_BIGINT_NULL
+
+
+class TaosMultiBind(ctypes.Structure):
+ _fields_ = [
+ ("buffer_type", c_int),
+ ("buffer", c_void_p),
+ ("buffer_length", c_size_t),
+ ("length", POINTER(c_int32)),
+ ("is_null", c_char_p),
+ ("num", c_int),
+ ]
+
+ def null(self, num):
+ self.buffer_type = FieldType.C_NULL
+ self.is_null = cast((c_char * num)(*[1 for _ in range(num)]), c_char_p)
+ self.buffer = c_void_p(None)
+ self.num = num
+
+ def bool(self, values):
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int8 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_BOOL_NULL for v in values])
+
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+ self.buffer_type = FieldType.C_BOOL
+ self.buffer_length = sizeof(c_bool)
+
+ def tinyint(self, values):
+ self.buffer_type = FieldType.C_TINYINT
+ self.buffer_length = sizeof(c_int8)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int8 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_NULL for v in values])
+
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def smallint(self, values):
+ self.buffer_type = FieldType.C_SMALLINT
+ self.buffer_length = sizeof(c_int16)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int16 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def int(self, values):
+ self.buffer_type = FieldType.C_INT
+ self.buffer_length = sizeof(c_int32)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int32 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_INT_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def bigint(self, values):
+ self.buffer_type = FieldType.C_BIGINT
+ self.buffer_length = sizeof(c_int64)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int64 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def float(self, values):
+ self.buffer_type = FieldType.C_FLOAT
+ self.buffer_length = sizeof(c_float)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_float * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_FLOAT_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def double(self, values):
+ self.buffer_type = FieldType.C_DOUBLE
+ self.buffer_length = sizeof(c_double)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_double * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_DOUBLE_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def _str_to_buffer(self, values):
+ self.num = len(values)
+ is_null = [1 if v == None else 0 for v in values]
+ self.is_null = cast((c_byte * self.num)(*is_null), c_char_p)
+
+ if sum(is_null) == self.num:
+ self.length = (c_int32 * len(values))(0 * self.num)
+ return
+ if sys.version_info < (3, 0):
+ _bytes = [bytes(value) if value is not None else None for value in values]
+ buffer_length = max(len(b) + 1 for b in _bytes if b is not None)
+ buffers = [
+ create_string_buffer(b, buffer_length) if b is not None else create_string_buffer(buffer_length)
+ for b in _bytes
+ ]
+ buffer_all = b''.join(v[:] for v in buffers)
+ self.buffer = cast(c_char_p(buffer_all), c_void_p)
+ else:
+ _bytes = [value.encode("utf-8") if value is not None else None for value in values]
+ buffer_length = max(len(b) for b in _bytes if b is not None)
+ self.buffer = cast(
+ c_char_p(
+ b"".join(
+ [
+ create_string_buffer(b, buffer_length)
+ if b is not None
+ else create_string_buffer(buffer_length)
+ for b in _bytes
+ ]
+ )
+ ),
+ c_void_p,
+ )
+ self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes])
+ self.buffer_length = buffer_length
+ def binary(self, values):
+ self.buffer_type = FieldType.C_BINARY
+ self._str_to_buffer(values)
+
+ def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int64 * len(values)
+ buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
+
+ self.buffer_type = FieldType.C_TIMESTAMP
+ self.buffer = cast(buffer, c_void_p)
+ self.buffer_length = sizeof(c_int64)
+ self.num = len(values)
+
+ def nchar(self, values):
+ # type: (list[str]) -> None
+ self.buffer_type = FieldType.C_NCHAR
+ self._str_to_buffer(values)
+
+ def tinyint_unsigned(self, values):
+ self.buffer_type = FieldType.C_TINYINT_UNSIGNED
+ self.buffer_length = sizeof(c_uint8)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_uint8 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_TINYINT_UNSIGNED_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def smallint_unsigned(self, values):
+ self.buffer_type = FieldType.C_SMALLINT_UNSIGNED
+ self.buffer_length = sizeof(c_uint16)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_uint16 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_SMALLINT_UNSIGNED_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def int_unsigned(self, values):
+ self.buffer_type = FieldType.C_INT_UNSIGNED
+ self.buffer_length = sizeof(c_uint32)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_uint32 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_INT_UNSIGNED_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+ def bigint_unsigned(self, values):
+ self.buffer_type = FieldType.C_BIGINT_UNSIGNED
+ self.buffer_length = sizeof(c_uint64)
+
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_uint64 * len(values)
+ try:
+ buffer = buffer_type(*values)
+ except:
+ buffer = buffer_type(*[v if v is not None else FieldType.C_BIGINT_UNSIGNED_NULL for v in values])
+ self.buffer = cast(buffer, c_void_p)
+ self.num = len(values)
+
+
+def new_bind_param():
+ # type: () -> TaosBind
+ return TaosBind()
+
+
+def new_bind_params(size):
+ # type: (int) -> Array[TaosBind]
+ return (TaosBind * size)()
+
+
+def new_multi_bind():
+ # type: () -> TaosMultiBind
+ return TaosMultiBind()
+
+
+def new_multi_binds(size):
+ # type: (int) -> Array[TaosMultiBind]
+ return (TaosMultiBind * size)()
diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py
index 0f690aeb27ce81d867d1727c50ba948afd9493a8..a1b6fe312b5725b8bf030701608d93c3e0c85706 100644
--- a/src/connector/python/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -1,285 +1,836 @@
+# encoding:UTF-8
+
import ctypes
-from .constants import FieldType
-from .error import *
-import math
-import datetime
import platform
+import sys
+from ctypes import *
+try:
+ from typing import Any
+except:
+ pass
+
+from .error import *
+from .bind import *
+from .field import *
+
+
+# stream callback
+stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
+stream_callback2_type = CFUNCTYPE(None, c_void_p)
+
+# C interface class
+class TaosOption:
+ Locale = (0,)
+ Charset = (1,)
+ Timezone = (2,)
+ ConfigDir = (3,)
+ ShellActivityTimer = (4,)
+ MaxOptions = (5,)
+
+
+def _load_taos_linux():
+ return ctypes.CDLL("libtaos.so")
+
+
+def _load_taos_darwin():
+ return ctypes.CDLL("libtaos.dylib")
+
+
+def _load_taos_windows():
+ return ctypes.windll.LoadLibrary("taos")
-def _convert_millisecond_to_datetime(milli):
- return datetime.datetime.fromtimestamp(milli / 1000.0)
+def _load_taos():
+ load_func = {
+ "Linux": _load_taos_linux,
+ "Darwin": _load_taos_darwin,
+ "Windows": _load_taos_windows,
+ }
+ try:
+ return load_func[platform.system()]()
+ except:
+ raise InterfaceError('unsupported platform or failed to load taos client library')
-def _convert_microsecond_to_datetime(micro):
- return datetime.datetime.fromtimestamp(micro / 1000000.0)
+_libtaos = _load_taos()
+_libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
+_libtaos.taos_init.restype = None
+_libtaos.taos_connect.restype = ctypes.c_void_p
+_libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
+_libtaos.taos_errstr.restype = ctypes.c_char_p
+_libtaos.taos_subscribe.restype = ctypes.c_void_p
+_libtaos.taos_consume.restype = ctypes.c_void_p
+_libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int)
+_libtaos.taos_free_result.restype = None
+_libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
+try:
+ _libtaos.taos_stmt_errstr.restype = c_char_p
+except AttributeError:
+ None
+finally:
+ None
-def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
+
+_libtaos.taos_options.restype = None
+
+
+def taos_options(option, *args):
+ # type: (TaosOption, Any) -> None
+ _libtaos.taos_options(option, *args)
+
+
+def taos_init():
+ # type: () -> None
+ """
+ C: taos_init
"""
- _timestamp_converter = _convert_millisecond_to_datetime
- if micro:
- _timestamp_converter = _convert_microsecond_to_datetime
+ _libtaos.taos_init()
+
- return [
- None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_int64))[
- :abs(num_of_rows)]]
+_libtaos.taos_cleanup.restype = None
-def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bool row to python row
+def taos_cleanup():
+ # type: () -> None
+ """Cleanup workspace."""
+ _libtaos.taos_cleanup()
+
+
+_libtaos.taos_get_client_info.restype = c_char_p
+
+
+def taos_get_client_info():
+ # type: () -> str
+ """Get client version info."""
+ return _libtaos.taos_get_client_info().decode()
+
+
+_libtaos.taos_get_server_info.restype = c_char_p
+_libtaos.taos_get_server_info.argtypes = (c_void_p,)
+
+
+def taos_get_server_info(connection):
+ # type: (c_void_p) -> str
+ """Get server version as string."""
+ return _libtaos.taos_get_server_info(connection).decode()
+
+
+_libtaos.taos_close.restype = None
+_libtaos.taos_close.argtypes = (c_void_p,)
+
+
+def taos_close(connection):
+ # type: (c_void_p) -> None
+ """Close the TAOS* connection"""
+ _libtaos.taos_close(connection)
+
+
+_libtaos.taos_connect.restype = c_void_p
+_libtaos.taos_connect.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16
+
+
+def taos_connect(host=None, user="root", password="taosdata", db=None, port=0):
+ # type: (None|str, str, str, None|str, int) -> c_void_p
+ """Create TDengine database connection.
+
+ - host: server hostname/FQDN
+ - user: user name
+ - password: user password
+ - db: database name (optional)
+ - port: server port
+
+ @rtype: c_void_p, TDengine handle
"""
- return [
- None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_byte))[
- :abs(num_of_rows)]]
+ # host
+ try:
+ _host = c_char_p(host.encode("utf-8")) if host is not None else None
+ except AttributeError:
+ raise AttributeError("host is expected as a str")
+
+ # user
+ try:
+ _user = c_char_p(user.encode("utf-8"))
+ except AttributeError:
+ raise AttributeError("user is expected as a str")
+
+ # password
+ try:
+ _password = c_char_p(password.encode("utf-8"))
+ except AttributeError:
+ raise AttributeError("password is expected as a str")
+
+ # db
+ try:
+ _db = c_char_p(db.encode("utf-8")) if db is not None else None
+ except AttributeError:
+ raise AttributeError("db is expected as a str")
+
+ # port
+ try:
+ _port = c_uint16(port)
+ except TypeError:
+ raise TypeError("port is expected as an uint16")
+
+ connection = cast(_libtaos.taos_connect(_host, _user, _password, _db, _port), c_void_p)
+
+ if connection.value is None:
+ raise ConnectionError("connect to TDengine failed")
+ return connection
-def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C tinyint row to python row
+_libtaos.taos_connect_auth.restype = c_void_p
+_libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16
+
+
+def taos_connect_auth(host=None, user="root", auth="", db=None, port=0):
+ # type: (None|str, str, str, None|str, int) -> c_void_p
+ """Connect server with auth token.
+
+ - host: server hostname/FQDN
+ - user: user name
+ - auth: base64 encoded auth token
+ - db: database name (optional)
+ - port: server port
+
+ @rtype: c_void_p, TDengine handle
"""
- return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]]
+ # host
+ try:
+ _host = c_char_p(host.encode("utf-8")) if host is not None else None
+ except AttributeError:
+ raise AttributeError("host is expected as a str")
+
+ # user
+ try:
+ _user = c_char_p(user.encode("utf-8"))
+ except AttributeError:
+ raise AttributeError("user is expected as a str")
+
+ # auth
+ try:
+ _auth = c_char_p(auth.encode("utf-8"))
+ except AttributeError:
+ raise AttributeError("password is expected as a str")
+
+ # db
+ try:
+ _db = c_char_p(db.encode("utf-8")) if db is not None else None
+ except AttributeError:
+ raise AttributeError("db is expected as a str")
+
+ # port
+ try:
+ _port = c_int(port)
+ except TypeError:
+ raise TypeError("port is expected as an int")
+
+ connection = c_void_p(_libtaos.taos_connect_auth(_host, _user, _auth, _db, _port))
+
+ if connection.value is None:
+ raise ConnectionError("connect to TDengine failed")
+ return connection
-def _crow_tinyint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C tinyint row to python row
+_libtaos.taos_query.restype = c_void_p
+_libtaos.taos_query.argtypes = c_void_p, c_char_p
+
+
+def taos_query(connection, sql):
+ # type: (c_void_p, str) -> c_void_p
+ """Run SQL
+
+ - sql: str, sql string to run
+
+ @return: TAOS_RES*, result pointer
+
"""
- return [
- None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ubyte))[
- :abs(num_of_rows)]]
+ try:
+ ptr = c_char_p(sql.encode("utf-8"))
+ res = c_void_p(_libtaos.taos_query(connection, ptr))
+ errno = taos_errno(res)
+ if errno != 0:
+ errstr = taos_errstr(res)
+ taos_free_result(res)
+ raise ProgrammingError(errstr, errno)
+ return res
+ except AttributeError:
+ raise AttributeError("sql is expected as a string")
+
+
+async_query_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_int)
+_libtaos.taos_query_a.restype = None
+_libtaos.taos_query_a.argtypes = c_void_p, c_char_p, async_query_callback_type, c_void_p
+
+
+def taos_query_a(connection, sql, callback, param):
+ # type: (c_void_p, str, async_query_callback_type, c_void_p) -> c_void_p
+ _libtaos.taos_query_a(connection, c_char_p(sql.encode("utf-8")), async_query_callback_type(callback), param)
+
+
+async_fetch_rows_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_int)
+_libtaos.taos_fetch_rows_a.restype = None
+_libtaos.taos_fetch_rows_a.argtypes = c_void_p, async_fetch_rows_callback_type, c_void_p
+
+
+def taos_fetch_rows_a(result, callback, param):
+ # type: (c_void_p, async_fetch_rows_callback_type, c_void_p) -> c_void_p
+ _libtaos.taos_fetch_rows_a(result, async_fetch_rows_callback_type(callback), param)
-def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
+def taos_affected_rows(result):
+ # type: (c_void_p) -> c_int
+ """The affected rows after runing query"""
+ return _libtaos.taos_affected_rows(result)
+
+
+subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int)
+_libtaos.taos_subscribe.restype = c_void_p
+# _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int
+
+
+def taos_subscribe(connection, restart, topic, sql, interval, callback=None, param=None):
+ # type: (c_void_p, bool, str, str, c_int, subscribe_callback_type, c_void_p | None) -> c_void_p
+ """Create a subscription
+ @restart boolean,
+ @sql string, sql statement for data query, must be a 'select' statement.
+ @topic string, name of this subscription
"""
- return [
- None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_short))[
- :abs(num_of_rows)]]
+ if callback != None:
+ callback = subscribe_callback_type(callback)
+ if param != None:
+ param = c_void_p(param)
+ return c_void_p(
+ _libtaos.taos_subscribe(
+ connection,
+ 1 if restart else 0,
+ c_char_p(topic.encode("utf-8")),
+ c_char_p(sql.encode("utf-8")),
+ callback or None,
+ param,
+ interval,
+ )
+ )
+
+
+_libtaos.taos_consume.restype = c_void_p
+_libtaos.taos_consume.argstype = c_void_p,
+
+
+def taos_consume(sub):
+ """Consume data of a subscription"""
+ return c_void_p(_libtaos.taos_consume(sub))
+
+
+_libtaos.taos_unsubscribe.restype = None
+_libtaos.taos_unsubscribe.argstype = c_void_p, c_int
+
+
+def taos_unsubscribe(sub, keep_progress):
+ """Cancel a subscription"""
+ _libtaos.taos_unsubscribe(sub, 1 if keep_progress else 0)
+
+
+def taos_use_result(result):
+ """Use result after calling self.query, it's just for 1.6."""
+ fields = []
+ pfields = taos_fetch_fields_raw(result)
+ for i in range(taos_field_count(result)):
+ fields.append(
+ {
+ "name": pfields[i].name,
+ "bytes": pfields[i].bytes,
+ "type": pfields[i].type,
+ }
+ )
+
+ return fields
+
+
+_libtaos.taos_fetch_block.restype = c_int
+_libtaos.taos_fetch_block.argtypes = c_void_p, c_void_p
+
+
+def taos_fetch_block_raw(result):
+ pblock = ctypes.c_void_p(0)
+ num_of_rows = _libtaos.taos_fetch_block(result, ctypes.byref(pblock))
+ if num_of_rows == 0:
+ return None, 0
+ return pblock, abs(num_of_rows)
+
+
+def taos_fetch_block(result, fields=None, field_count=None):
+ pblock = ctypes.c_void_p(0)
+ num_of_rows = _libtaos.taos_fetch_block(result, ctypes.byref(pblock))
+ if num_of_rows == 0:
+ return None, 0
+ precision = taos_result_precision(result)
+ if fields == None:
+ fields = taos_fetch_fields(result)
+ if field_count == None:
+ field_count = taos_field_count(result)
+ blocks = [None] * field_count
+ fieldLen = taos_fetch_lengths(result, field_count)
+ for i in range(len(fields)):
+ data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
+ if fields[i]["type"] not in CONVERT_FUNC:
+ raise DatabaseError("Invalid data type returned from database")
+ blocks[i] = CONVERT_FUNC_BLOCK[fields[i]["type"]](data, num_of_rows, fieldLen[i], precision)
+
+ return blocks, abs(num_of_rows)
+
+
+_libtaos.taos_fetch_row.restype = c_void_p
+_libtaos.taos_fetch_row.argtypes = (c_void_p,)
+
+
+def taos_fetch_row_raw(result):
+ # type: (c_void_p) -> c_void_p
+ row = c_void_p(_libtaos.taos_fetch_row(result))
+ if row:
+ return row
+ return None
+
+def taos_fetch_row(result, fields):
+ # type: (c_void_p, Array[TaosField]) -> tuple(c_void_p, int)
+ pblock = ctypes.c_void_p(0)
+ pblock = taos_fetch_row_raw(result)
+ if pblock:
+ num_of_rows = 1
+ precision = taos_result_precision(result)
+ field_count = taos_field_count(result)
+ blocks = [None] * field_count
+ field_lens = taos_fetch_lengths(result, field_count)
+ for i in range(field_count):
+ data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
+ if fields[i].type not in CONVERT_FUNC:
+ raise DatabaseError("Invalid data type returned from database")
+ if data is None:
+ blocks[i] = [None]
+ else:
+ blocks[i] = CONVERT_FUNC[fields[i].type](data, num_of_rows, field_lens[i], precision)
+ else:
+ return None, 0
+ return blocks, abs(num_of_rows)
+
+
+_libtaos.taos_free_result.argtypes = (c_void_p,)
+
+
+def taos_free_result(result):
+ # type: (c_void_p) -> None
+ if result != None:
+ _libtaos.taos_free_result(result)
+
+
+_libtaos.taos_field_count.restype = c_int
+_libtaos.taos_field_count.argstype = (c_void_p,)
+
+
+def taos_field_count(result):
+ # type: (c_void_p) -> int
+ return _libtaos.taos_field_count(result)
+
+
+def taos_num_fields(result):
+ # type: (c_void_p) -> int
+ return _libtaos.taos_num_fields(result)
+
+
+_libtaos.taos_fetch_fields.restype = c_void_p
+_libtaos.taos_fetch_fields.argstype = (c_void_p,)
+
+
+def taos_fetch_fields_raw(result):
+ # type: (c_void_p) -> c_void_p
+ return c_void_p(_libtaos.taos_fetch_fields(result))
+
+
+def taos_fetch_fields(result):
+ # type: (c_void_p) -> TaosFields
+ fields = taos_fetch_fields_raw(result)
+ count = taos_field_count(result)
+ return TaosFields(fields, count)
+
+
+def taos_fetch_lengths(result, field_count=None):
+ # type: (c_void_p, int) -> Array[int]
+ """Make sure to call taos_fetch_row or taos_fetch_block before fetch_lengths"""
+ lens = _libtaos.taos_fetch_lengths(result)
+ if field_count == None:
+ field_count = taos_field_count(result)
+ if not lens:
+ raise OperationalError("field length empty, use taos_fetch_row/block before it")
+ return lens[:field_count]
+
+
+def taos_result_precision(result):
+ # type: (c_void_p) -> c_int
+ return _libtaos.taos_result_precision(result)
+
+
+_libtaos.taos_errno.restype = c_int
+_libtaos.taos_errno.argstype = (c_void_p,)
+
+
+def taos_errno(result):
+ # type: (ctypes.c_void_p) -> c_int
+ """Return the error number."""
+ return _libtaos.taos_errno(result)
+
+
+_libtaos.taos_errstr.restype = c_char_p
+_libtaos.taos_errstr.argstype = (c_void_p,)
+
+
+def taos_errstr(result=c_void_p(None)):
+ # type: (ctypes.c_void_p) -> str
+ """Return the error styring"""
+ return _libtaos.taos_errstr(result).decode("utf-8")
+
+
+_libtaos.taos_stop_query.restype = None
+_libtaos.taos_stop_query.argstype = (c_void_p,)
+
+
+def taos_stop_query(result):
+ # type: (ctypes.c_void_p) -> None
+ """Stop current query"""
+ return _libtaos.taos_stop_query(result)
+
+
+_libtaos.taos_load_table_info.restype = c_int
+_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p)
+
+
+def taos_load_table_info(connection, tables):
+ # type: (ctypes.c_void_p, str) -> None
+ """Stop current query"""
+ errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8")))
+ if errno != 0:
+ msg = taos_errstr()
+ raise OperationalError(msg, errno)
+
+
+_libtaos.taos_validate_sql.restype = c_int
+_libtaos.taos_validate_sql.argstype = (c_void_p, c_char_p)
+
+
+def taos_validate_sql(connection, sql):
+ # type: (ctypes.c_void_p, str) -> None | str
+ """Get taosd server info"""
+ errno = _libtaos.taos_validate_sql(connection, ctypes.c_char_p(sql.encode("utf-8")))
+ if errno != 0:
+ msg = taos_errstr()
+ return msg
+ return None
+
+
+_libtaos.taos_print_row.restype = c_int
+_libtaos.taos_print_row.argstype = (c_char_p, c_void_p, c_void_p, c_int)
+
+
+def taos_print_row(row, fields, num_fields, buffer_size=4096):
+ # type: (ctypes.c_void_p, ctypes.c_void_p | TaosFields, int, int) -> str
+ """Print an row to string"""
+ p = ctypes.create_string_buffer(buffer_size)
+ if isinstance(fields, TaosFields):
+ _libtaos.taos_print_row(p, row, fields.as_ptr(), num_fields)
+ else:
+ _libtaos.taos_print_row(p, row, fields, num_fields)
+ if p:
+ return p.value.decode("utf-8")
+ raise OperationalError("taos_print_row failed")
+
+
+_libtaos.taos_select_db.restype = c_int
+_libtaos.taos_select_db.argstype = (c_void_p, c_char_p)
+
+
+def taos_select_db(connection, db):
+ # type: (ctypes.c_void_p, str) -> None
+ """Select database, eq to sql: use """
+ res = _libtaos.taos_select_db(connection, ctypes.c_char_p(db.encode("utf-8")))
+ if res != 0:
+ raise DatabaseError("select database error", res)
+
+
+try:
+ _libtaos.taos_open_stream.restype = c_void_p
+ _libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any
+except:
+ pass
+
+
+def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None):
+ # type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer
+ if callback2 != None:
+ callback2 = stream_callback2_type(callback2)
+ """Open an stream"""
+ return c_void_p(
+ _libtaos.taos_open_stream(
+ connection, ctypes.c_char_p(sql.encode("utf-8")), stream_callback_type(callback), stime, param, callback2
+ )
+ )
+
+
+_libtaos.taos_close_stream.restype = None
+_libtaos.taos_close_stream.argstype = (c_void_p,)
+
+
+def taos_close_stream(stream):
+ # type: (c_void_p) -> None
+ """Open an stream"""
+ return _libtaos.taos_close_stream(stream)
-def _crow_smallint_unsigned_to_python(
- data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C smallint row to python row
+
+_libtaos.taos_stmt_init.restype = c_void_p
+_libtaos.taos_stmt_init.argstype = (c_void_p,)
+
+
+def taos_stmt_init(connection):
+ # type: (c_void_p) -> (c_void_p)
+ """Create a statement query
+ @param(connection): c_void_p TAOS*
+ @rtype: c_void_p, *TAOS_STMT
"""
- return [
- None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_ushort))[
- :abs(num_of_rows)]]
+ return c_void_p(_libtaos.taos_stmt_init(connection))
+
+_libtaos.taos_stmt_prepare.restype = c_int
+_libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int)
-def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
+
+def taos_stmt_prepare(stmt, sql):
+ # type: (ctypes.c_void_p, str) -> None
+ """Prepare a statement query
+ @stmt: c_void_p TAOS_STMT*
"""
- return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]]
+ buffer = sql.encode("utf-8")
+ res = _libtaos.taos_stmt_prepare(stmt, ctypes.c_char_p(buffer), len(buffer))
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+_libtaos.taos_stmt_close.restype = c_int
+_libtaos.taos_stmt_close.argstype = (c_void_p,)
-def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C int row to python row
+
+def taos_stmt_close(stmt):
+ # type: (ctypes.c_void_p) -> None
+ """Close a statement query
+ @stmt: c_void_p TAOS_STMT*
"""
- return [
- None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint))[
- :abs(num_of_rows)]]
+ res = _libtaos.taos_stmt_close(stmt)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+try:
+ _libtaos.taos_stmt_errstr.restype = c_char_p
+ _libtaos.taos_stmt_errstr.argstype = (c_void_p,)
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info())
-def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C bigint row to python row
+
+def taos_stmt_errstr(stmt):
+ # type: (ctypes.c_void_p) -> str
+ """Get error message from stetement query
+ @stmt: c_void_p TAOS_STMT*
"""
- return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
+ err = c_char_p(_libtaos.taos_stmt_errstr(stmt))
+ if err:
+ return err.value.decode("utf-8")
+
+try:
+ _libtaos.taos_stmt_set_tbname.restype = c_int
+ _libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p)
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname" % taos_get_client_info())
-def _crow_bigint_unsigned_to_python(
- data,
- num_of_rows,
- nbytes=None,
- micro=False):
- """Function to convert C bigint row to python row
+
+def taos_stmt_set_tbname(stmt, name):
+ # type: (ctypes.c_void_p, str) -> None
+ """Set table name of a statement query if exists.
+ @stmt: c_void_p TAOS_STMT*
"""
- return [
- None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
- data, ctypes.POINTER(
- ctypes.c_uint64))[
- :abs(num_of_rows)]]
+ res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8")))
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+try:
+ _libtaos.taos_stmt_set_tbname_tags.restype = c_int
+ _libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p)
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname_tags" % taos_get_client_info())
-def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C float row to python row
+
+def taos_stmt_set_tbname_tags(stmt, name, tags):
+ # type: (c_void_p, str, c_void_p) -> None
+ """Set table name with tags bind params.
+ @stmt: c_void_p TAOS_STMT*
"""
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]]
+ res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags)
+
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+_libtaos.taos_stmt_is_insert.restype = c_int
+_libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int))
-def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C double row to python row
+
+def taos_stmt_is_insert(stmt):
+ # type: (ctypes.c_void_p) -> bool
+ """Set table name with tags bind params.
+ @stmt: c_void_p TAOS_STMT*
"""
- return [None if math.isnan(ele) else ele for ele in ctypes.cast(
- data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]]
+ is_insert = ctypes.c_int()
+ res = _libtaos.taos_stmt_is_insert(stmt, ctypes.byref(is_insert))
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+ return is_insert == 0
+
+_libtaos.taos_stmt_num_params.restype = c_int
+_libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int))
-def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
+
+def taos_stmt_num_params(stmt):
+ # type: (ctypes.c_void_p) -> int
+ """Params number of the current statement query.
+ @stmt: TAOS_STMT*
"""
- assert(nbytes is not None)
- return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode(
- 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
+ num_params = ctypes.c_int()
+ res = _libtaos.taos_stmt_num_params(stmt, ctypes.byref(num_params))
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+ return num_params.value
+
+_libtaos.taos_stmt_bind_param.restype = c_int
+_libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p)
-def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
+
+def taos_stmt_bind_param(stmt, bind):
+ # type: (ctypes.c_void_p, Array[TaosBind]) -> None
+ """Bind params in the statement query.
+ @stmt: TAOS_STMT*
+ @bind: TAOS_BIND*
"""
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- if num_of_rows >= 0:
- tmpstr = ctypes.c_char_p(data)
- res.append(tmpstr.value.decode())
- else:
- res.append((ctypes.cast(data + nbytes * i,
- ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value)
- except ValueError:
- res.append(None)
+ # ptr = ctypes.cast(bind, POINTER(TaosBind))
+ # ptr = pointer(bind)
+ res = _libtaos.taos_stmt_bind_param(stmt, bind)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+try:
+ _libtaos.taos_stmt_bind_param_batch.restype = c_int
+ _libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p)
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support taos_stmt_bind_param_batch" % taos_get_client_info())
- return res
-def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C binary row to python row
+def taos_stmt_bind_param_batch(stmt, bind):
+ # type: (ctypes.c_void_p, Array[TaosMultiBind]) -> None
+ """Bind params in the statement query.
+ @stmt: TAOS_STMT*
+ @bind: TAOS_BIND*
"""
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- rbyte = ctypes.cast(
- data + nbytes * i,
- ctypes.POINTER(
- ctypes.c_short))[
- :1].pop()
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode()[0:rbyte])
- except ValueError:
- res.append(None)
- return res
-
-
-def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False):
- """Function to convert C nchar row to python row
+ # ptr = ctypes.cast(bind, POINTER(TaosMultiBind))
+ # ptr = pointer(bind)
+ res = _libtaos.taos_stmt_bind_param_batch(stmt, bind)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
+try:
+ _libtaos.taos_stmt_bind_single_param_batch.restype = c_int
+ _libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int)
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support taos_stmt_bind_single_param_batch" % taos_get_client_info())
+
+
+def taos_stmt_bind_single_param_batch(stmt, bind, col):
+ # type: (ctypes.c_void_p, Array[TaosMultiBind], c_int) -> None
+ """Bind params in the statement query.
+ @stmt: TAOS_STMT*
+ @bind: TAOS_MULTI_BIND*
+ @col: column index
"""
- assert(nbytes is not None)
- res = []
- for i in range(abs(num_of_rows)):
- try:
- tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
- res.append(tmpstr.value.decode())
- except ValueError:
- res.append(None)
- return res
-
-
-_CONVERT_FUNC = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-_CONVERT_FUNC_BLOCK = {
- FieldType.C_BOOL: _crow_bool_to_python,
- FieldType.C_TINYINT: _crow_tinyint_to_python,
- FieldType.C_SMALLINT: _crow_smallint_to_python,
- FieldType.C_INT: _crow_int_to_python,
- FieldType.C_BIGINT: _crow_bigint_to_python,
- FieldType.C_FLOAT: _crow_float_to_python,
- FieldType.C_DOUBLE: _crow_double_to_python,
- FieldType.C_BINARY: _crow_binary_to_python_block,
- FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
- FieldType.C_NCHAR: _crow_nchar_to_python_block,
- FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
- FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
- FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
- FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python
-}
-
-# Corresponding TAOS_FIELD structure in C
-
-
-class TaosField(ctypes.Structure):
- _fields_ = [('name', ctypes.c_char * 65),
- ('type', ctypes.c_char),
- ('bytes', ctypes.c_short)]
+ res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
-# C interface class
+_libtaos.taos_stmt_add_batch.restype = c_int
+_libtaos.taos_stmt_add_batch.argstype = (c_void_p,)
-def _load_taos_linux():
- return ctypes.CDLL('libtaos.so')
+def taos_stmt_add_batch(stmt):
+ # type: (ctypes.c_void_p) -> None
+ """Add current params into batch
+ @stmt: TAOS_STMT*
+ """
+ res = _libtaos.taos_stmt_add_batch(stmt)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
-def _load_taos_darwin():
- return ctypes.CDLL('libtaos.dylib')
+_libtaos.taos_stmt_execute.restype = c_int
+_libtaos.taos_stmt_execute.argstype = (c_void_p,)
-def _load_taos_windows():
- return ctypes.windll.LoadLibrary('taos')
+def taos_stmt_execute(stmt):
+ # type: (ctypes.c_void_p) -> None
+ """Execute a statement query
+ @stmt: TAOS_STMT*
+ """
+ res = _libtaos.taos_stmt_execute(stmt)
+ if res != 0:
+ raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
-def _load_taos():
- load_func = {
- 'Linux': _load_taos_linux,
- 'Darwin': _load_taos_darwin,
- 'Windows': _load_taos_windows,
- }
- try:
- return load_func[platform.system()]()
- except:
- sys.exit('unsupported platform to TDengine connector')
+_libtaos.taos_stmt_use_result.restype = c_void_p
+_libtaos.taos_stmt_use_result.argstype = (c_void_p,)
-class CTaosInterface(object):
- libtaos = _load_taos()
-
- libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
- libtaos.taos_init.restype = None
- libtaos.taos_connect.restype = ctypes.c_void_p
- # libtaos.taos_use_result.restype = ctypes.c_void_p
- libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
- libtaos.taos_errstr.restype = ctypes.c_char_p
- libtaos.taos_subscribe.restype = ctypes.c_void_p
- libtaos.taos_consume.restype = ctypes.c_void_p
- libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
- libtaos.taos_free_result.restype = None
- libtaos.taos_errno.restype = ctypes.c_int
- libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
+def taos_stmt_use_result(stmt):
+ # type: (ctypes.c_void_p) -> None
+ """Get result of the statement.
+ @stmt: TAOS_STMT*
+ """
+ result = c_void_p(_libtaos.taos_stmt_use_result(stmt))
+ if result == None:
+ raise StatementError(taos_stmt_errstr(stmt))
+ return result
+
+try:
+ _libtaos.taos_insert_lines.restype = c_int
+ _libtaos.taos_insert_lines.argstype = c_void_p, c_void_p, c_int
+except AttributeError:
+ print("WARNING: libtaos(%s) does not support insert_lines" % taos_get_client_info())
+
+
+
+def taos_insert_lines(connection, lines):
+ # type: (c_void_p, list[str] | tuple(str)) -> None
+ num_of_lines = len(lines)
+ lines = (c_char_p(line.encode("utf-8")) for line in lines)
+ lines_type = ctypes.c_char_p * num_of_lines
+ p_lines = lines_type(*lines)
+ errno = _libtaos.taos_insert_lines(connection, p_lines, num_of_lines)
+ if errno != 0:
+ raise LinesError("insert lines error", errno)
+
+
+class CTaosInterface(object):
def __init__(self, config=None):
- '''
+ """
Function to initialize the class
@host : str, hostname to connect
@user : str, username to connect to server
@@ -288,292 +839,46 @@ class CTaosInterface(object):
@config : str, config directory
@rtype : None
- '''
+ """
if config is None:
self._config = ctypes.c_char_p(None)
else:
try:
- self._config = ctypes.c_char_p(config.encode('utf-8'))
+ self._config = ctypes.c_char_p(config.encode("utf-8"))
except AttributeError:
raise AttributeError("config is expected as a str")
if config is not None:
- CTaosInterface.libtaos.taos_options(3, self._config)
+ taos_options(3, self._config)
- CTaosInterface.libtaos.taos_init()
+ taos_init()
@property
def config(self):
- """ Get current config
- """
+ """Get current config"""
return self._config
- def connect(
- self,
- host=None,
- user="root",
- password="taosdata",
- db=None,
- port=0):
- '''
+ def connect(self, host=None, user="root", password="taosdata", db=None, port=0):
+ """
Function to connect to server
@rtype: c_void_p, TDengine handle
- '''
- # host
- try:
- _host = ctypes.c_char_p(host.encode(
- "utf-8")) if host is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("host is expected as a str")
-
- # user
- try:
- _user = ctypes.c_char_p(user.encode("utf-8"))
- except AttributeError:
- raise AttributeError("user is expected as a str")
-
- # password
- try:
- _password = ctypes.c_char_p(password.encode("utf-8"))
- except AttributeError:
- raise AttributeError("password is expected as a str")
-
- # db
- try:
- _db = ctypes.c_char_p(
- db.encode("utf-8")) if db is not None else ctypes.c_char_p(None)
- except AttributeError:
- raise AttributeError("db is expected as a str")
-
- # port
- try:
- _port = ctypes.c_int(port)
- except TypeError:
- raise TypeError("port is expected as an int")
-
- connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect(
- _host, _user, _password, _db, _port))
-
- if connection.value is None:
- print('connect to TDengine failed')
- raise ConnectionError("connect to TDengine failed")
- # sys.exit(1)
- # else:
- # print('connect to TDengine success')
-
- return connection
-
- @staticmethod
- def close(connection):
- '''Close the TDengine handle
- '''
- CTaosInterface.libtaos.taos_close(connection)
- # print('connection is closed')
-
- @staticmethod
- def query(connection, sql):
- '''Run SQL
-
- @sql: str, sql string to run
-
- @rtype: 0 on success and -1 on failure
- '''
- try:
- return CTaosInterface.libtaos.taos_query(
- connection, ctypes.c_char_p(sql.encode('utf-8')))
- except AttributeError:
- raise AttributeError("sql is expected as a string")
- # finally:
- # CTaosInterface.libtaos.close(connection)
-
- @staticmethod
- def affectedRows(result):
- """The affected rows after runing query
- """
- return CTaosInterface.libtaos.taos_affected_rows(result)
-
- @staticmethod
- def subscribe(connection, restart, topic, sql, interval):
- """Create a subscription
- @restart boolean,
- @sql string, sql statement for data query, must be a 'select' statement.
- @topic string, name of this subscription
- """
- return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe(
- connection,
- 1 if restart else 0,
- ctypes.c_char_p(topic.encode('utf-8')),
- ctypes.c_char_p(sql.encode('utf-8')),
- None,
- None,
- interval))
-
- @staticmethod
- def consume(sub):
- """Consume data of a subscription
- """
- result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub))
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.libtaos.taos_num_fields(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
- return result, fields
-
- @staticmethod
- def unsubscribe(sub, keepProgress):
- """Cancel a subscription
- """
- CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
-
- @staticmethod
- def useResult(result):
- '''Use result after calling self.query
- '''
- fields = []
- pfields = CTaosInterface.fetchFields(result)
- for i in range(CTaosInterface.fieldsCount(result)):
- fields.append({'name': pfields[i].name.decode('utf-8'),
- 'bytes': pfields[i].bytes,
- 'type': ord(pfields[i].type)})
-
- return fields
-
- @staticmethod
- def fetchBlock(result, fields):
- pblock = ctypes.c_void_p(0)
- num_of_rows = CTaosInterface.libtaos.taos_fetch_block(
- result, ctypes.byref(pblock))
- if num_of_rows == 0:
- return None, 0
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC_BLOCK:
- raise DatabaseError("Invalid data type returned from database")
- blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
-
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def fetchRow(result, fields):
- pblock = ctypes.c_void_p(0)
- pblock = CTaosInterface.libtaos.taos_fetch_row(result)
- if pblock:
- num_of_rows = 1
- isMicro = (CTaosInterface.libtaos.taos_result_precision(
- result) == FieldType.C_TIMESTAMP_MICRO)
- blocks = [None] * len(fields)
- fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result)
- fieldLen = [
- ele for ele in ctypes.cast(
- fieldL, ctypes.POINTER(
- ctypes.c_int))[
- :len(fields)]]
- for i in range(len(fields)):
- data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i]
- if fields[i]['type'] not in _CONVERT_FUNC:
- raise DatabaseError(
- "Invalid data type returned from database")
- if data is None:
- blocks[i] = [None]
- else:
- blocks[i] = _CONVERT_FUNC[fields[i]['type']](
- data, num_of_rows, fieldLen[i], isMicro)
- else:
- return None, 0
- return blocks, abs(num_of_rows)
-
- @staticmethod
- def freeResult(result):
- CTaosInterface.libtaos.taos_free_result(result)
- result.value = None
-
- @staticmethod
- def fieldsCount(result):
- return CTaosInterface.libtaos.taos_field_count(result)
-
- @staticmethod
- def fetchFields(result):
- return CTaosInterface.libtaos.taos_fetch_fields(result)
-
- # @staticmethod
- # def fetchRow(result, fields):
- # l = []
- # row = CTaosInterface.libtaos.taos_fetch_row(result)
- # if not row:
- # return None
-
- # for i in range(len(fields)):
- # l.append(CTaosInterface.getDataValue(
- # row[i], fields[i]['type'], fields[i]['bytes']))
-
- # return tuple(l)
-
- # @staticmethod
- # def getDataValue(data, dtype, byte):
- # '''
- # '''
- # if not data:
- # return None
-
- # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
- # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
- # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
- # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
- # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
-
- @staticmethod
- def errno(result):
- """Return the error number.
- """
- return CTaosInterface.libtaos.taos_errno(result)
-
- @staticmethod
- def errStr(result):
- """Return the error styring
"""
- return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
+ return taos_connect(host, user, password, db, port)
-if __name__ == '__main__':
+if __name__ == "__main__":
cinter = CTaosInterface()
conn = cinter.connect()
- result = cinter.query(conn, 'show databases')
+ result = cinter.query(conn, "show databases")
- print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
+ print("Query Affected rows: {}".format(cinter.affected_rows(result)))
- fields = CTaosInterface.useResult(result)
+ fields = taos_fetch_fields_raw(result)
- data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
+ data, num_of_rows = taos_fetch_block(result, fields)
print(data)
- cinter.freeResult(result)
+ cinter.free_result(result)
cinter.close(conn)
diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py
index f6c395342c9c39a24bda6022f0ed36cb7bfe045b..7857c8c706dbe27fd9440e6bf2eb698b6822650e 100644
--- a/src/connector/python/taos/connection.py
+++ b/src/connector/python/taos/connection.py
@@ -1,11 +1,15 @@
-from .cursor import TDengineCursor
-from .subscription import TDengineSubscription
-from .cinterface import CTaosInterface
+# encoding:UTF-8
+from types import FunctionType
+from .cinterface import *
+from .cursor import TaosCursor
+from .subscription import TaosSubscription
+from .statement import TaosStmt
+from .stream import TaosStream
+from .result import *
-class TDengineConnection(object):
- """ TDengine connection object
- """
+class TaosConnection(object):
+ """TDengine connection object"""
def __init__(self, *args, **kwargs):
self._conn = None
@@ -21,55 +25,130 @@ class TDengineConnection(object):
def config(self, **kwargs):
# host
- if 'host' in kwargs:
- self._host = kwargs['host']
+ if "host" in kwargs:
+ self._host = kwargs["host"]
# user
- if 'user' in kwargs:
- self._user = kwargs['user']
+ if "user" in kwargs:
+ self._user = kwargs["user"]
# password
- if 'password' in kwargs:
- self._password = kwargs['password']
+ if "password" in kwargs:
+ self._password = kwargs["password"]
# database
- if 'database' in kwargs:
- self._database = kwargs['database']
+ if "database" in kwargs:
+ self._database = kwargs["database"]
# port
- if 'port' in kwargs:
- self._port = kwargs['port']
+ if "port" in kwargs:
+ self._port = kwargs["port"]
# config
- if 'config' in kwargs:
- self._config = kwargs['config']
+ if "config" in kwargs:
+ self._config = kwargs["config"]
self._chandle = CTaosInterface(self._config)
- self._conn = self._chandle.connect(
- self._host,
- self._user,
- self._password,
- self._database,
- self._port)
+ self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port)
def close(self):
- """Close current connection.
- """
- return CTaosInterface.close(self._conn)
+ """Close current connection."""
+ if self._conn:
+ taos_close(self._conn)
+ self._conn = None
+
+ @property
+ def client_info(self):
+ # type: () -> str
+ return taos_get_client_info()
+
+ @property
+ def server_info(self):
+ # type: () -> str
+ return taos_get_server_info(self._conn)
+
+ def select_db(self, database):
+ # type: (str) -> None
+ taos_select_db(self._conn, database)
+
+ def execute(self, sql):
+ # type: (str) -> None
+ """Simplely execute sql ignoring the results"""
+ res = taos_query(self._conn, sql)
+ taos_free_result(res)
+
+ def query(self, sql):
+ # type: (str) -> TaosResult
+ result = taos_query(self._conn, sql)
+ return TaosResult(result, True, self)
+
+ def query_a(self, sql, callback, param):
+ # type: (str, async_query_callback_type, c_void_p) -> None
+ """Asynchronously query a sql with callback function"""
+ taos_query_a(self._conn, sql, callback, param)
+
+ def subscribe(self, restart, topic, sql, interval, callback=None, param=None):
+ # type: (bool, str, str, int, subscribe_callback_type, c_void_p) -> TaosSubscription
+ """Create a subscription."""
+ if self._conn is None:
+ return None
+ sub = taos_subscribe(self._conn, restart, topic, sql, interval, callback, param)
+ return TaosSubscription(sub, callback != None)
- def subscribe(self, restart, topic, sql, interval):
- """Create a subscription.
- """
+ def statement(self, sql=None):
+ # type: (str | None) -> TaosStmt
if self._conn is None:
return None
- sub = CTaosInterface.subscribe(
- self._conn, restart, topic, sql, interval)
- return TDengineSubscription(sub)
+ stmt = taos_stmt_init(self._conn)
+ if sql != None:
+ taos_stmt_prepare(stmt, sql)
+
+ return TaosStmt(stmt)
+
+ def load_table_info(self, tables):
+ # type: (str) -> None
+ taos_load_table_info(self._conn, tables)
+
+ def stream(self, sql, callback, stime=0, param=None, callback2=None):
+ # type: (str, Callable[[Any, TaosResult, TaosRows], None], int, Any, c_void_p) -> TaosStream
+ # cb = cast(callback, stream_callback_type)
+ # ref = byref(cb)
+
+ stream = taos_open_stream(self._conn, sql, callback, stime, param, callback2)
+ return TaosStream(stream)
+
+ def insert_lines(self, lines):
+ # type: (list[str]) -> None
+ """Line protocol and schemaless support
+
+ ## Example
+
+ ```python
+ import taos
+ conn = taos.connect()
+ conn.exec("drop database if exists test")
+ conn.select_db("test")
+ lines = [
+ 'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532',
+ ]
+ conn.insert_lines(lines)
+ ```
+
+ ## Exception
+
+ ```python
+ try:
+ conn.insert_lines(lines)
+ except SchemalessError as err:
+ print(err)
+ ```
+ """
+ return taos_insert_lines(self._conn, lines)
def cursor(self):
- """Return a new Cursor object using the connection.
- """
- return TDengineCursor(self)
+ # type: () -> TaosCursor
+ """Return a new Cursor object using the connection."""
+ return TaosCursor(self)
def commit(self):
"""Commit any pending transaction to the database.
@@ -79,17 +158,18 @@ class TDengineConnection(object):
pass
def rollback(self):
- """Void functionality
- """
+ """Void functionality"""
pass
def clear_result_set(self):
- """Clear unused result set on this connection.
- """
+ """Clear unused result set on this connection."""
pass
+ def __del__(self):
+ self.close()
+
if __name__ == "__main__":
- conn = TDengineConnection(host='192.168.1.107')
+ conn = TaosConnection()
conn.close()
print("Hello world")
diff --git a/src/connector/python/taos/constants.py b/src/connector/python/taos/constants.py
index 93466f5184a6bf37c2e1c915a00aa5c5e91d1801..8ad5b69fc099718fa4f4b8c08cf689b17663eae0 100644
--- a/src/connector/python/taos/constants.py
+++ b/src/connector/python/taos/constants.py
@@ -1,12 +1,14 @@
+# encoding:UTF-8
+
"""Constants in TDengine python
"""
-from .dbapi import *
+import ctypes, struct
class FieldType(object):
- """TDengine Field Types
- """
+ """TDengine Field Types"""
+
# type_code
C_NULL = 0
C_BOOL = 1
@@ -34,9 +36,11 @@ class FieldType(object):
C_INT_UNSIGNED_NULL = 4294967295
C_BIGINT_NULL = -9223372036854775808
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float('nan')
- C_DOUBLE_NULL = float('nan')
- C_BINARY_NULL = bytearray([int('0xff', 16)])
+ C_FLOAT_NULL = ctypes.c_float(struct.unpack(" name (mondatory)
- > type_code (mondatory)
+ > name (mandatory)
+ > type_code (mandatory)
> display_size
> internal_size
> precision
@@ -55,8 +55,7 @@ class TDengineCursor(object):
raise OperationalError("Invalid use of fetch iterator")
if self._block_rows <= self._block_iter:
- block, self._block_rows = CTaosInterface.fetchRow(
- self._result, self._fields)
+ block, self._block_rows = taos_fetch_row(self._result, self._fields)
if self._block_rows == 0:
raise StopIteration
self._block = list(map(tuple, zip(*block)))
@@ -69,20 +68,17 @@ class TDengineCursor(object):
@property
def description(self):
- """Return the description of the object.
- """
+ """Return the description of the object."""
return self._description
@property
def rowcount(self):
- """Return the rowcount of the object
- """
+ """Return the rowcount of the object"""
return self._rowcount
@property
def affected_rows(self):
- """Return the rowcount of insertion
- """
+ """Return the rowcount of insertion"""
return self._affected_rows
def callproc(self, procname, *args):
@@ -96,8 +92,7 @@ class TDengineCursor(object):
self._logfile = logfile
def close(self):
- """Close the cursor.
- """
+ """Close the cursor."""
if self._connection is None:
return False
@@ -107,8 +102,7 @@ class TDengineCursor(object):
return True
def execute(self, operation, params=None):
- """Prepare and execute a database operation (query or command).
- """
+ """Prepare and execute a database operation (query or command)."""
if not operation:
return None
@@ -124,104 +118,91 @@ class TDengineCursor(object):
# global querySeqNum
# querySeqNum += 1
- # localSeqNum = querySeqNum # avoid raice condition
+ # localSeqNum = querySeqNum # avoid race condition
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
- self._result = CTaosInterface.query(self._connection._conn, stmt)
+ self._result = taos_query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
- if (self._logfile):
+ if self._logfile:
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
- if errno == 0:
- if CTaosInterface.fieldsCount(self._result) == 0:
- self._affected_rows += CTaosInterface.affectedRows(
- self._result)
- return CTaosInterface.affectedRows(self._result)
- else:
- self._fields = CTaosInterface.useResult(
- self._result)
- return self._handle_result()
+ if taos_field_count(self._result) == 0:
+ affected_rows = taos_affected_rows(self._result)
+ self._affected_rows += affected_rows
+ return affected_rows
else:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
+ self._fields = taos_fetch_fields(self._result)
+ return self._handle_result()
def executemany(self, operation, seq_of_parameters):
- """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
- """
+ """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters."""
pass
def fetchone(self):
- """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.
- """
+ """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available."""
pass
def fetchmany(self):
pass
def istype(self, col, dataType):
- if (dataType.upper() == "BOOL"):
- if (self._description[col][1] == FieldType.C_BOOL):
+ if dataType.upper() == "BOOL":
+ if self._description[col][1] == FieldType.C_BOOL:
return True
- if (dataType.upper() == "TINYINT"):
- if (self._description[col][1] == FieldType.C_TINYINT):
+ if dataType.upper() == "TINYINT":
+ if self._description[col][1] == FieldType.C_TINYINT:
return True
- if (dataType.upper() == "TINYINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED):
+ if dataType.upper() == "TINYINT UNSIGNED":
+ if self._description[col][1] == FieldType.C_TINYINT_UNSIGNED:
return True
- if (dataType.upper() == "SMALLINT"):
- if (self._description[col][1] == FieldType.C_SMALLINT):
+ if dataType.upper() == "SMALLINT":
+ if self._description[col][1] == FieldType.C_SMALLINT:
return True
- if (dataType.upper() == "SMALLINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED):
+ if dataType.upper() == "SMALLINT UNSIGNED":
+ if self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED:
return True
- if (dataType.upper() == "INT"):
- if (self._description[col][1] == FieldType.C_INT):
+ if dataType.upper() == "INT":
+ if self._description[col][1] == FieldType.C_INT:
return True
- if (dataType.upper() == "INT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_INT_UNSIGNED):
+ if dataType.upper() == "INT UNSIGNED":
+ if self._description[col][1] == FieldType.C_INT_UNSIGNED:
return True
- if (dataType.upper() == "BIGINT"):
- if (self._description[col][1] == FieldType.C_BIGINT):
+ if dataType.upper() == "BIGINT":
+ if self._description[col][1] == FieldType.C_BIGINT:
return True
- if (dataType.upper() == "BIGINT UNSIGNED"):
- if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED):
+ if dataType.upper() == "BIGINT UNSIGNED":
+ if self._description[col][1] == FieldType.C_BIGINT_UNSIGNED:
return True
- if (dataType.upper() == "FLOAT"):
- if (self._description[col][1] == FieldType.C_FLOAT):
+ if dataType.upper() == "FLOAT":
+ if self._description[col][1] == FieldType.C_FLOAT:
return True
- if (dataType.upper() == "DOUBLE"):
- if (self._description[col][1] == FieldType.C_DOUBLE):
+ if dataType.upper() == "DOUBLE":
+ if self._description[col][1] == FieldType.C_DOUBLE:
return True
- if (dataType.upper() == "BINARY"):
- if (self._description[col][1] == FieldType.C_BINARY):
+ if dataType.upper() == "BINARY":
+ if self._description[col][1] == FieldType.C_BINARY:
return True
- if (dataType.upper() == "TIMESTAMP"):
- if (self._description[col][1] == FieldType.C_TIMESTAMP):
+ if dataType.upper() == "TIMESTAMP":
+ if self._description[col][1] == FieldType.C_TIMESTAMP:
return True
- if (dataType.upper() == "NCHAR"):
- if (self._description[col][1] == FieldType.C_NCHAR):
+ if dataType.upper() == "NCHAR":
+ if self._description[col][1] == FieldType.C_NCHAR:
return True
return False
def fetchall_row(self):
- """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
- """
+ """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation."""
if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall")
buffer = [[] for i in range(len(self._fields))]
self._rowcount = 0
while True:
- block, num_of_fields = CTaosInterface.fetchRow(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
+ block, num_of_fields = taos_fetch_row(self._result, self._fields)
+ errno = taos_errno(self._result)
if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
+ raise ProgrammingError(taos_errstr(self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
@@ -230,19 +211,16 @@ class TDengineCursor(object):
return list(map(tuple, zip(*buffer)))
def fetchall(self):
- if self._result is None or self._fields is None:
+ if self._result is None:
raise OperationalError("Invalid use of fetchall")
-
- buffer = [[] for i in range(len(self._fields))]
+ fields = self._fields if self._fields is not None else taos_fetch_fields(self._result)
+ buffer = [[] for i in range(len(fields))]
self._rowcount = 0
while True:
- block, num_of_fields = CTaosInterface.fetchBlock(
- self._result, self._fields)
- errno = CTaosInterface.libtaos.taos_errno(self._result)
+ block, num_of_fields = taos_fetch_block(self._result, self._fields)
+ errno = taos_errno(self._result)
if errno != 0:
- raise ProgrammingError(
- CTaosInterface.errStr(
- self._result), errno)
+ raise ProgrammingError(taos_errstr(self._result), errno)
if num_of_fields == 0:
break
self._rowcount += num_of_fields
@@ -250,9 +228,12 @@ class TDengineCursor(object):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
+ def stop_query(self):
+ if self._result != None:
+ taos_stop_query(self._result)
+
def nextset(self):
- """
- """
+ """ """
pass
def setinputsize(self, sizes):
@@ -262,12 +243,11 @@ class TDengineCursor(object):
pass
def _reset_result(self):
- """Reset the result to unused version.
- """
+ """Reset the result to unused version."""
self._description = []
self._rowcount = -1
if self._result is not None:
- CTaosInterface.freeResult(self._result)
+ taos_free_result(self._result)
self._result = None
self._fields = None
self._block = None
@@ -276,11 +256,12 @@ class TDengineCursor(object):
self._affected_rows = 0
def _handle_result(self):
- """Handle the return result from query.
- """
+ """Handle the return result from query."""
self._description = []
for ele in self._fields:
- self._description.append(
- (ele['name'], ele['type'], None, None, None, None, False))
+ self._description.append((ele["name"], ele["type"], None, None, None, None, False))
return self._result
+
+ def __del__(self):
+ self.close()
diff --git a/src/connector/python/taos/dbapi.py b/src/connector/python/taos/dbapi.py
deleted file mode 100644
index 594681ada953abf388e503c23199043cf686e1a3..0000000000000000000000000000000000000000
--- a/src/connector/python/taos/dbapi.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Type Objects and Constructors.
-"""
-
-import time
-import datetime
-
-
-class DBAPITypeObject(object):
- def __init__(self, *values):
- self.values = values
-
- def __com__(self, other):
- if other in self.values:
- return 0
- if other < self.values:
- return 1
- else:
- return -1
-
-
-Date = datetime.date
-Time = datetime.time
-Timestamp = datetime.datetime
-
-
-def DataFromTicks(ticks):
- return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
- return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
- return Timestamp(*time.localtime(ticks)[:6])
-
-
-Binary = bytes
-
-# STRING = DBAPITypeObject(*constants.FieldType.get_string_types())
-# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types())
-# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types())
-# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types())
-# ROWID = DBAPITypeObject()
diff --git a/src/connector/python/taos/error.py b/src/connector/python/taos/error.py
index c584badce8320cd35dc81e8f6b613c56163b1a29..a30adbb162f1c194bdfcf4cca5c43f01107a9776 100644
--- a/src/connector/python/taos/error.py
+++ b/src/connector/python/taos/error.py
@@ -1,66 +1,86 @@
+# encoding:UTF-8
"""Python exceptions
"""
class Error(Exception):
- def __init__(self, msg=None, errno=None):
+ def __init__(self, msg=None, errno=0xffff):
self.msg = msg
- self._full_msg = self.msg
self.errno = errno
+ self._full_msg = "[0x%04x]: %s" % (self.errno & 0xffff, self.msg)
def __str__(self):
return self._full_msg
class Warning(Exception):
- """Exception raised for important warnings like data truncations while inserting.
- """
+ """Exception raised for important warnings like data truncations while inserting."""
+
pass
class InterfaceError(Error):
- """Exception raised for errors that are related to the database interface rather than the database itself.
- """
+ """Exception raised for errors that are related to the database interface rather than the database itself."""
+
pass
class DatabaseError(Error):
- """Exception raised for errors that are related to the database.
- """
+ """Exception raised for errors that are related to the database."""
+
pass
+class ConnectionError(Error):
+ """Exceptin raised for connection failed"""
+ pass
class DataError(DatabaseError):
- """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
- """
+ """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range."""
+
pass
class OperationalError(DatabaseError):
- """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
- """
+ """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer"""
+
pass
class IntegrityError(DatabaseError):
- """Exception raised when the relational integrity of the database is affected.
- """
+ """Exception raised when the relational integrity of the database is affected."""
+
pass
class InternalError(DatabaseError):
- """Exception raised when the database encounters an internal error.
- """
+ """Exception raised when the database encounters an internal error."""
+
pass
class ProgrammingError(DatabaseError):
- """Exception raised for programming errors.
- """
+ """Exception raised for programming errors."""
+
pass
class NotSupportedError(DatabaseError):
- """Exception raised in case a method or database API was used which is not supported by the database,.
- """
+ """Exception raised in case a method or database API was used which is not supported by the database,."""
+
pass
+
+
+class StatementError(DatabaseError):
+ """Exception raised in STMT API."""
+
+ pass
+
+class ResultError(DatabaseError):
+ """Result related APIs."""
+
+ pass
+
+class LinesError(DatabaseError):
+ """taos_insert_lines errors."""
+
+ pass
\ No newline at end of file
diff --git a/src/connector/python/taos/field.py b/src/connector/python/taos/field.py
new file mode 100644
index 0000000000000000000000000000000000000000..445cd8afdba6f2512c73be95c9b0dbd8dc00da8a
--- /dev/null
+++ b/src/connector/python/taos/field.py
@@ -0,0 +1,302 @@
+# encoding:UTF-8
+import ctypes
+import math
+import datetime
+from ctypes import *
+
+from .constants import FieldType
+from .error import *
+
+_datetime_epoch = datetime.datetime.fromtimestamp(0)
+
+def _convert_millisecond_to_datetime(milli):
+ return _datetime_epoch + datetime.timedelta(seconds=milli / 1000.0)
+
+
+def _convert_microsecond_to_datetime(micro):
+ return _datetime_epoch + datetime.timedelta(seconds=micro / 1000000.0)
+
+
+def _convert_nanosecond_to_datetime(nanosec):
+ return nanosec
+
+
+def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C bool row to python row"""
+ _timestamp_converter = _convert_millisecond_to_datetime
+ if precision == FieldType.C_TIMESTAMP_MILLI:
+ _timestamp_converter = _convert_millisecond_to_datetime
+ elif precision == FieldType.C_TIMESTAMP_MICRO:
+ _timestamp_converter = _convert_microsecond_to_datetime
+ elif precision == FieldType.C_TIMESTAMP_NANO:
+ _timestamp_converter = _convert_nanosecond_to_datetime
+ else:
+ raise DatabaseError("Unknown precision returned from database")
+
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele)
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_bool_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C bool row to python row"""
+ return [
+ None if ele == FieldType.C_BOOL_NULL else bool(ele)
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C tinyint row to python row"""
+ return [
+ None if ele == FieldType.C_TINYINT_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_tinyint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C tinyint row to python row"""
+ return [
+ None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ubyte))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_smallint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C smallint row to python row"""
+ return [
+ None if ele == FieldType.C_SMALLINT_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_smallint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C smallint row to python row"""
+ return [
+ None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_ushort))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_int_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C int row to python row"""
+ return [
+ None if ele == FieldType.C_INT_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C int row to python row"""
+ return [
+ None if ele == FieldType.C_INT_UNSIGNED_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_bigint_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C bigint row to python row"""
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_bigint_unsigned_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C bigint row to python row"""
+ return [
+ None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_uint64))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_float_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C float row to python row"""
+ return [
+ None if math.isnan(ele) else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_double_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C double row to python row"""
+ return [
+ None if math.isnan(ele) else ele
+ for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_binary_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C binary row to python row"""
+ assert nbytes is not None
+ return [
+ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode("utf-8")
+ for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[: abs(num_of_rows)]
+ ]
+
+
+def _crow_nchar_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C nchar row to python row"""
+ assert nbytes is not None
+ res = []
+ for i in range(abs(num_of_rows)):
+ try:
+ if num_of_rows >= 0:
+ tmpstr = ctypes.c_char_p(data)
+ res.append(tmpstr.value.decode())
+ else:
+ res.append(
+ (
+ ctypes.cast(
+ data + nbytes * i,
+ ctypes.POINTER(ctypes.c_wchar * (nbytes // 4)),
+ )
+ )[0].value
+ )
+ except ValueError:
+ res.append(None)
+
+ return res
+
+
+def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C binary row to python row"""
+ assert nbytes is not None
+ res = []
+ for i in range(abs(num_of_rows)):
+ try:
+ rbyte = ctypes.cast(data + nbytes * i, ctypes.POINTER(ctypes.c_short))[:1].pop()
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode()[0:rbyte])
+ except ValueError:
+ res.append(None)
+ return res
+
+
+def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN):
+ """Function to convert C nchar row to python row"""
+ assert nbytes is not None
+ res = []
+ for i in range(abs(num_of_rows)):
+ try:
+ tmpstr = ctypes.c_char_p(data + nbytes * i + 2)
+ res.append(tmpstr.value.decode())
+ except ValueError:
+ res.append(None)
+ return res
+
+
+CONVERT_FUNC = {
+ FieldType.C_BOOL: _crow_bool_to_python,
+ FieldType.C_TINYINT: _crow_tinyint_to_python,
+ FieldType.C_SMALLINT: _crow_smallint_to_python,
+ FieldType.C_INT: _crow_int_to_python,
+ FieldType.C_BIGINT: _crow_bigint_to_python,
+ FieldType.C_FLOAT: _crow_float_to_python,
+ FieldType.C_DOUBLE: _crow_double_to_python,
+ FieldType.C_BINARY: _crow_binary_to_python,
+ FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
+ FieldType.C_NCHAR: _crow_nchar_to_python,
+ FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
+ FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
+ FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
+ FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
+}
+
+CONVERT_FUNC_BLOCK = {
+ FieldType.C_BOOL: _crow_bool_to_python,
+ FieldType.C_TINYINT: _crow_tinyint_to_python,
+ FieldType.C_SMALLINT: _crow_smallint_to_python,
+ FieldType.C_INT: _crow_int_to_python,
+ FieldType.C_BIGINT: _crow_bigint_to_python,
+ FieldType.C_FLOAT: _crow_float_to_python,
+ FieldType.C_DOUBLE: _crow_double_to_python,
+ FieldType.C_BINARY: _crow_binary_to_python_block,
+ FieldType.C_TIMESTAMP: _crow_timestamp_to_python,
+ FieldType.C_NCHAR: _crow_nchar_to_python_block,
+ FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python,
+ FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python,
+ FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python,
+ FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python,
+}
+
+# Corresponding TAOS_FIELD structure in C
+
+
+class TaosField(ctypes.Structure):
+ _fields_ = [
+ ("_name", ctypes.c_char * 65),
+ ("_type", ctypes.c_uint8),
+ ("_bytes", ctypes.c_uint16),
+ ]
+
+ @property
+ def name(self):
+ return self._name.decode("utf-8")
+
+ @property
+ def length(self):
+ """alias to self.bytes"""
+ return self._bytes
+
+ @property
+ def bytes(self):
+ return self._bytes
+
+ @property
+ def type(self):
+ return self._type
+
+ def __dict__(self):
+ return {"name": self.name, "type": self.type, "bytes": self.length}
+
+ def __str__(self):
+ return "{name: %s, type: %d, bytes: %d}" % (self.name, self.type, self.length)
+
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+
+class TaosFields(object):
+ def __init__(self, fields, count):
+ if isinstance(fields, c_void_p):
+ self._fields = cast(fields, POINTER(TaosField))
+ if isinstance(fields, POINTER(TaosField)):
+ self._fields = fields
+ self._count = count
+ self._iter = 0
+
+ def as_ptr(self):
+ return self._fields
+
+ @property
+ def count(self):
+ return self._count
+
+ @property
+ def fields(self):
+ return self._fields
+
+ def __next__(self):
+ return self._next_field()
+
+ def next(self):
+ return self._next_field()
+
+ def _next_field(self):
+ if self._iter < self.count:
+ field = self._fields[self._iter]
+ self._iter += 1
+ return field
+ else:
+ raise StopIteration
+
+ def __getitem__(self, item):
+ return self._fields[item]
+
+ def __iter__(self):
+ return self
+
+ def __len__(self):
+ return self.count
diff --git a/src/connector/python/taos/precision.py b/src/connector/python/taos/precision.py
new file mode 100644
index 0000000000000000000000000000000000000000..d67da592cce6d2121ec8f2eed78a30d6fa0c446b
--- /dev/null
+++ b/src/connector/python/taos/precision.py
@@ -0,0 +1,12 @@
+class PrecisionEnum(object):
+ """Precision enums"""
+
+ Milliseconds = 0
+ Microseconds = 1
+ Nanoseconds = 2
+
+
+class PrecisionError(Exception):
+ """Python datetime does not support nanoseconds error"""
+
+ pass
diff --git a/src/connector/python/taos/result.py b/src/connector/python/taos/result.py
new file mode 100644
index 0000000000000000000000000000000000000000..81151733615d1b7fdc3318b6e53888ae39d32b14
--- /dev/null
+++ b/src/connector/python/taos/result.py
@@ -0,0 +1,245 @@
+from .cinterface import *
+
+# from .connection import TaosConnection
+from .error import *
+
+
+class TaosResult(object):
+ """TDengine result interface"""
+
+ def __init__(self, result, close_after=False, conn=None):
+ # type: (c_void_p, bool, TaosConnection) -> TaosResult
+ # to make the __del__ order right
+ self._conn = conn
+ self._close_after = close_after
+ self._result = result
+ self._fields = None
+ self._field_count = None
+ self._precision = None
+
+ self._block = None
+ self._block_length = None
+ self._row_count = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next_row()
+
+ def next(self):
+ # fetch next row
+ return self._next_row()
+
+ def _next_row(self):
+ if self._result is None or self.fields is None:
+ raise OperationalError("Invalid use of fetch iterator")
+
+ if self._block == None or self._block_iter >= self._block_length:
+ self._block, self._block_length = self.fetch_block()
+ self._block_iter = 0
+ # self._row_count += self._block_length
+
+ raw = self._block[self._block_iter]
+ self._block_iter += 1
+ return raw
+
+ @property
+ def fields(self):
+ """fields definitions of the current result"""
+ if self._result is None:
+ raise ResultError("no result object setted")
+ if self._fields == None:
+ self._fields = taos_fetch_fields(self._result)
+
+ return self._fields
+
+ @property
+ def field_count(self):
+ """Field count of the current result, eq to taos_field_count(result)"""
+ return self.fields.count
+
+ @property
+ def row_count(self):
+ """Return the rowcount of the object"""
+ return self._row_count
+
+ @property
+ def precision(self):
+ if self._precision == None:
+ self._precision = taos_result_precision(self._result)
+ return self._precision
+
+ @property
+ def affected_rows(self):
+ return taos_affected_rows(self._result)
+
+ # @property
+ def field_lengths(self):
+ return taos_fetch_lengths(self._result, self.field_count)
+
+ def rows_iter(self, num_of_rows=None):
+ return TaosRows(self, num_of_rows)
+
+ def blocks_iter(self):
+ return TaosBlocks(self)
+
+ def fetch_block(self):
+ if self._result is None:
+ raise OperationalError("Invalid use of fetch iterator")
+
+ block, length = taos_fetch_block_raw(self._result)
+ if length == 0:
+ raise StopIteration
+ precision = self.precision
+ field_count = self.field_count
+ fields = self.fields
+ blocks = [None] * field_count
+ lengths = self.field_lengths()
+ for i in range(field_count):
+ data = ctypes.cast(block, ctypes.POINTER(ctypes.c_void_p))[i]
+ if fields[i].type not in CONVERT_FUNC_BLOCK:
+ raise DatabaseError("Invalid data type returned from database")
+ blocks[i] = CONVERT_FUNC_BLOCK[fields[i].type](data, length, lengths[i], precision)
+
+ return list(map(tuple, zip(*blocks))), length
+
+ def fetch_all(self):
+ if self._result is None:
+ raise OperationalError("Invalid use of fetchall")
+
+ if self._fields == None:
+ self._fields = taos_fetch_fields(self._result)
+ buffer = [[] for i in range(len(self._fields))]
+ self._row_count = 0
+ while True:
+ block, num_of_fields = taos_fetch_block(self._result, self._fields)
+ errno = taos_errno(self._result)
+ if errno != 0:
+ raise ProgrammingError(taos_errstr(self._result), errno)
+ if num_of_fields == 0:
+ break
+ self._row_count += num_of_fields
+ for i in range(len(self._fields)):
+ buffer[i].extend(block[i])
+ return list(map(tuple, zip(*buffer)))
+
+ def fetch_rows_a(self, callback, param):
+ taos_fetch_rows_a(self._result, callback, param)
+
+ def stop_query(self):
+ return taos_stop_query(self._result)
+
+ def errno(self):
+ """**DO NOT** use this directly unless you know what you are doing"""
+ return taos_errno(self._result)
+
+ def errstr(self):
+ return taos_errstr(self._result)
+
+ def check_error(self, errno=None, close=True):
+ if errno == None:
+ errno = self.errno()
+ if errno != 0:
+ msg = self.errstr()
+ self.close()
+ raise OperationalError(msg, errno)
+
+ def close(self):
+ """free result object."""
+ if self._result != None and self._close_after:
+ taos_free_result(self._result)
+ self._result = None
+ self._fields = None
+ self._field_count = None
+ self._field_lengths = None
+
+ def __del__(self):
+ self.close()
+
+
+class TaosRows:
+ """TDengine result rows iterator"""
+
+ def __init__(self, result, num_of_rows=None):
+ self._result = result
+ self._num_of_rows = num_of_rows
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next_row()
+
+ def next(self):
+ return self._next_row()
+
+ def _next_row(self):
+ if self._result is None:
+ raise OperationalError("Invalid use of fetch iterator")
+ if self._num_of_rows != None and self._num_of_rows <= self._result._row_count:
+ raise StopIteration
+
+ row = taos_fetch_row_raw(self._result._result)
+ if not row:
+ raise StopIteration
+ self._result._row_count += 1
+ return TaosRow(self._result, row)
+
+ @property
+ def row_count(self):
+ """Return the rowcount of the object"""
+ return self._result._row_count
+
+
+class TaosRow:
+ def __init__(self, result, row):
+ self._result = result
+ self._row = row
+
+ def __str__(self):
+ return taos_print_row(self._row, self._result.fields, self._result.field_count)
+
+ def __call__(self):
+ return self.as_tuple()
+
+ def _astuple(self):
+ return self.as_tuple()
+
+ def __iter__(self):
+ return self.as_tuple()
+
+ def as_ptr(self):
+ return self._row
+
+ def as_tuple(self):
+ precision = self._result.precision
+ field_count = self._result.field_count
+ blocks = [None] * field_count
+ fields = self._result.fields
+ field_lens = self._result.field_lengths()
+ for i in range(field_count):
+ data = ctypes.cast(self._row, ctypes.POINTER(ctypes.c_void_p))[i]
+ if fields[i].type not in CONVERT_FUNC:
+ raise DatabaseError("Invalid data type returned from database")
+ if data is None:
+ blocks[i] = None
+ else:
+ blocks[i] = CONVERT_FUNC[fields[i].type](data, 1, field_lens[i], precision)[0]
+ return tuple(blocks)
+
+
+class TaosBlocks:
+ """TDengine result blocks iterator"""
+
+ def __init__(self, result):
+ self._result = result
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._result.fetch_block()
+
+ def next(self):
+ return self._result.fetch_block()
diff --git a/src/connector/python/taos/statement.py b/src/connector/python/taos/statement.py
new file mode 100644
index 0000000000000000000000000000000000000000..155e98173b7f920640aa84d0fcda618d2669bb1e
--- /dev/null
+++ b/src/connector/python/taos/statement.py
@@ -0,0 +1,85 @@
+from taos.cinterface import *
+from taos.error import *
+from taos.result import *
+
+
+class TaosStmt(object):
+ """TDengine STMT interface"""
+
+ def __init__(self, stmt, conn = None):
+ self._conn = conn
+ self._stmt = stmt
+
+ def set_tbname(self, name):
+ """Set table name if needed.
+
+ Note that the set_tbname* method should only used in insert statement
+ """
+ if self._stmt is None:
+ raise StatementError("Invalid use of set_tbname")
+ taos_stmt_set_tbname(self._stmt, name)
+
+ def prepare(self, sql):
+ # type: (str) -> None
+ taos_stmt_prepare(self._stmt, sql)
+
+ def set_tbname_tags(self, name, tags):
+ # type: (str, Array[TaosBind]) -> None
+ """Set table name with tags, tags is array of BindParams"""
+ if self._stmt is None:
+ raise StatementError("Invalid use of set_tbname")
+ taos_stmt_set_tbname_tags(self._stmt, name, tags)
+
+ def bind_param(self, params, add_batch=True):
+ # type: (Array[TaosBind], bool) -> None
+ if self._stmt is None:
+ raise StatementError("Invalid use of stmt")
+ taos_stmt_bind_param(self._stmt, params)
+ if add_batch:
+ taos_stmt_add_batch(self._stmt)
+
+ def bind_param_batch(self, binds, add_batch=True):
+ # type: (Array[TaosMultiBind], bool) -> None
+ if self._stmt is None:
+ raise StatementError("Invalid use of stmt")
+ taos_stmt_bind_param_batch(self._stmt, binds)
+ if add_batch:
+ taos_stmt_add_batch(self._stmt)
+
+ def add_batch(self):
+ if self._stmt is None:
+ raise StatementError("Invalid use of stmt")
+ taos_stmt_add_batch(self._stmt)
+
+ def execute(self):
+ if self._stmt is None:
+ raise StatementError("Invalid use of execute")
+ taos_stmt_execute(self._stmt)
+
+ def use_result(self):
+ result = taos_stmt_use_result(self._stmt)
+ return TaosResult(result)
+
+ def close(self):
+ """Close stmt."""
+ if self._stmt is None:
+ return
+ taos_stmt_close(self._stmt)
+ self._stmt = None
+
+ def __del__(self):
+ self.close()
+
+
+if __name__ == "__main__":
+ from taos.connection import TaosConnection
+
+ conn = TaosConnection()
+
+ stmt = conn.statement("select * from log.log limit 10")
+ stmt.execute()
+ result = stmt.use_result()
+ for row in result:
+ print(row)
+ stmt.close()
+ conn.close()
diff --git a/src/connector/python/taos/stream.py b/src/connector/python/taos/stream.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe3c8c85e3279511972293882224bf20c30dfa64
--- /dev/null
+++ b/src/connector/python/taos/stream.py
@@ -0,0 +1,22 @@
+from taos.cinterface import *
+from taos.error import *
+from taos.result import *
+
+
+class TaosStream(object):
+ """TDengine Stream interface"""
+
+ def __init__(self, stream):
+ self._raw = stream
+
+ def as_ptr(self):
+ return self._raw
+
+ def close(self):
+ """Close stmt."""
+ if self._raw is not None:
+ taos_close_stream(self._raw)
+ self._raw = None
+
+ def __del__(self):
+ self.close()
diff --git a/src/connector/python/taos/subscription.py b/src/connector/python/taos/subscription.py
index 270d9de09217fc58a389981a3542698dd1c0428a..3c6958b6f8d55791b9753a84a4bbd7653bdae780 100644
--- a/src/connector/python/taos/subscription.py
+++ b/src/connector/python/taos/subscription.py
@@ -1,49 +1,41 @@
-from .cinterface import CTaosInterface
+from taos.result import TaosResult
+from .cinterface import *
from .error import *
-class TDengineSubscription(object):
- """TDengine subscription object
- """
+class TaosSubscription(object):
+ """TDengine subscription object"""
- def __init__(self, sub):
+ def __init__(self, sub, with_callback = False):
self._sub = sub
+ self._with_callback = with_callback
def consume(self):
- """Consume rows of a subscription
- """
+ """Consume rows of a subscription"""
if self._sub is None:
raise OperationalError("Invalid use of consume")
-
- result, fields = CTaosInterface.consume(self._sub)
- buffer = [[] for i in range(len(fields))]
- while True:
- block, num_of_fields = CTaosInterface.fetchBlock(result, fields)
- if num_of_fields == 0:
- break
- for i in range(len(fields)):
- buffer[i].extend(block[i])
-
- self.fields = fields
- return list(map(tuple, zip(*buffer)))
+ if self._with_callback:
+ raise OperationalError("DONOT use consume method in an subscription with callback")
+ result = taos_consume(self._sub)
+ return TaosResult(result)
def close(self, keepProgress=True):
- """Close the Subscription.
- """
+ """Close the Subscription."""
if self._sub is None:
return False
- CTaosInterface.unsubscribe(self._sub, keepProgress)
+ taos_unsubscribe(self._sub, keepProgress)
+ self._sub = None
return True
+
+ def __del__(self):
+ self.close()
+
+if __name__ == "__main__":
+ from .connection import TaosConnection
-if __name__ == '__main__':
- from .connection import TDengineConnection
- conn = TDengineConnection(
- host="127.0.0.1",
- user="root",
- password="taosdata",
- database="test")
+ conn = TaosConnection(host="127.0.0.1", user="root", password="taosdata", database="test")
# Generate a cursor object to run SQL commands
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
diff --git a/src/connector/python/taos/timestamp.py b/src/connector/python/taos/timestamp.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab5679fdf12e2942aa94f76716ff98e6d2a88d69
--- /dev/null
+++ b/src/connector/python/taos/timestamp.py
@@ -0,0 +1,17 @@
+
+class TimestampType(object):
+ """Choose which type that parsing TDengine timestamp data to
+
+ - DATETIME: use python datetime.datetime, note that it does not support nanosecond precision,
+ and python taos will use raw c_int64 as a fallback for nanosecond results.
+ - NUMPY: use numpy.datetime64 type.
+ - RAW: use raw c_int64.
+ - TAOS: use taos' TaosTimestamp.
+ """
+ DATETIME = 0,
+ NUMPY = 1,
+ RAW = 2,
+ TAOS = 3,
+
+class TaosTimestamp:
+ pass
diff --git a/src/connector/python/tests/test-td6231.py b/src/connector/python/tests/test-td6231.py
new file mode 100644
index 0000000000000000000000000000000000000000..e55d22c10734eedcbd5be8012eaeb3fb3d51e381
--- /dev/null
+++ b/src/connector/python/tests/test-td6231.py
@@ -0,0 +1,50 @@
+from taos import *
+
+conn = connect()
+
+dbname = "pytest_taos_stmt_multi"
+conn.execute("drop database if exists %s" % dbname)
+conn.execute("create database if not exists %s" % dbname)
+conn.select_db(dbname)
+
+conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, \
+ ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \
+ su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+)
+
+stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+params = new_multi_binds(16)
+params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+params[1].bool((True, None, False))
+params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+params[3].tinyint([0, 127, None])
+params[4].smallint([3, None, 2])
+params[5].int([3, 4, None])
+params[6].bigint([3, 4, None])
+params[7].tinyint_unsigned([3, 4, None])
+params[8].smallint_unsigned([3, 4, None])
+params[9].int_unsigned([3, 4, None])
+params[10].bigint_unsigned([3, 4, None])
+params[11].float([3, None, 1])
+params[12].double([3, None, 1.2])
+params[13].binary(["abc", "dddafadfadfadfadfa", None])
+# params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+params[14].nchar([None, None, None])
+params[15].timestamp([None, None, 1626861392591])
+stmt.bind_param_batch(params)
+stmt.execute()
+
+
+result = stmt.use_result()
+assert result.affected_rows == 3
+result.close()
+
+result = conn.query("select * from log")
+for row in result:
+ print(row)
+result.close()
+stmt.close()
+conn.close()
diff --git a/src/connector/python/tests/test_ctaos.py b/src/connector/python/tests/test_ctaos.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b9566931f2b29dcbdc8646d2f087ebf40e716cc
--- /dev/null
+++ b/src/connector/python/tests/test_ctaos.py
@@ -0,0 +1,162 @@
+from taos.cinterface import *
+from taos.precision import *
+from taos.bind import *
+
+import time
+import datetime
+import pytest
+
+@pytest.fixture
+def conn():
+ return CTaosInterface().connect()
+
+
+def test_simple(conn, caplog):
+ dbname = "pytest_ctaos_simple"
+ try:
+ res = taos_query(conn, "create database if not exists %s" % dbname)
+ taos_free_result(res)
+
+ taos_select_db(conn, dbname)
+
+ res = taos_query(
+ conn,
+ "create table if not exists log(ts timestamp, level tinyint, content binary(100), ipaddr binary(134))",
+ )
+ taos_free_result(res)
+
+ res = taos_query(conn, "insert into log values(now, 1, 'hello', 'test')")
+ taos_free_result(res)
+
+ res = taos_query(conn, "select level,content,ipaddr from log limit 1")
+
+ fields = taos_fetch_fields_raw(res)
+ field_count = taos_field_count(res)
+
+ fields = taos_fetch_fields(res)
+ for field in fields:
+ print(field)
+
+ # field_lengths = taos_fetch_lengths(res, field_count)
+ # if not field_lengths:
+ # raise "fetch lengths error"
+
+ row = taos_fetch_row_raw(res)
+ rowstr = taos_print_row(row, fields, field_count)
+ assert rowstr == "1 hello test"
+
+ row, num = taos_fetch_row(res, fields)
+ print(row)
+ taos_free_result(res)
+ taos_query(conn, "drop database if exists " + dbname)
+ taos_close(conn)
+ except Exception as err:
+ taos_query(conn, "drop database if exists " + dbname)
+ raise err
+
+
+def test_stmt(conn, caplog):
+ dbname = "pytest_ctaos_stmt"
+ try:
+ res = taos_query(conn, "drop database if exists %s" % dbname)
+ taos_free_result(res)
+ res = taos_query(conn, "create database if not exists %s" % dbname)
+ taos_free_result(res)
+
+ taos_select_db(conn, dbname)
+
+ res = taos_query(
+ conn,
+ "create table if not exists log(ts timestamp, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100))",
+ )
+ taos_free_result(res)
+
+ stmt = taos_stmt_init(conn)
+
+ taos_stmt_prepare(stmt, "insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+
+ params = new_bind_params(14)
+ params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
+ params[1].null()
+ params[2].tinyint(2)
+ params[3].smallint(3)
+ params[4].int(4)
+ params[5].bigint(5)
+ params[6].tinyint_unsigned(6)
+ params[7].smallint_unsigned(7)
+ params[8].int_unsigned(8)
+ params[9].bigint_unsigned(9)
+ params[10].float(10.1)
+ params[11].double(10.11)
+ params[12].binary("hello")
+ params[13].nchar("stmt")
+ taos_stmt_bind_param(stmt, params)
+ taos_stmt_add_batch(stmt)
+ taos_stmt_execute(stmt)
+
+ res = taos_query(conn, "select * from log limit 1")
+
+ fields = taos_fetch_fields(res)
+ filed_count = taos_field_count(res)
+
+ row = taos_fetch_row_raw(res)
+ rowstr = taos_print_row(row, fields, filed_count, 100)
+
+ taos_free_result(res)
+ taos_query(conn, "drop database if exists " + dbname)
+ taos_close(conn)
+
+ assert rowstr == "1626861392589 NULL 2 3 4 5 6 7 8 9 10.100000 10.110000 hello stmt"
+ except Exception as err:
+ taos_query(conn, "drop database if exists " + dbname)
+ raise err
+
+def stream_callback(param, result, row):
+ # type: (c_void_p, c_void_p, c_void_p) -> None
+ try:
+ if result == None or row == None:
+ return
+ result = c_void_p(result)
+ row = c_void_p(row)
+ fields = taos_fetch_fields_raw(result)
+ num_fields = taos_field_count(result)
+ s = taos_print_row(row, fields, num_fields)
+ print(s)
+ taos_stop_query(result)
+ except Exception as err:
+ print(err)
+
+def test_stream(conn, caplog):
+ dbname = "pytest_ctaos_stream"
+ try:
+ res = taos_query(conn, "create database if not exists %s" % dbname)
+ taos_free_result(res)
+
+ taos_select_db(conn, dbname)
+
+ res = taos_query(
+ conn,
+ "create table if not exists log(ts timestamp, n int)",
+ )
+ taos_free_result(res)
+
+ res = taos_query(conn, "select count(*) from log interval(5s)")
+ cc = taos_num_fields(res)
+ assert cc == 2
+
+ stream = taos_open_stream(conn, "select count(*) from log interval(5s)", stream_callback, 0, None, None)
+ print("waiting for data")
+ time.sleep(1)
+
+ for i in range(0, 2):
+ res = taos_query(conn, "insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
+ taos_free_result(res)
+ time.sleep(2)
+ taos_close_stream(stream)
+ taos_query(conn, "drop database if exists " + dbname)
+ taos_close(conn)
+ except Exception as err:
+ taos_query(conn, "drop database if exists " + dbname)
+ raise err
diff --git a/src/connector/python/tests/test_info.py b/src/connector/python/tests/test_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..bddfec7ef9ddbc203adfcadd262839048466592c
--- /dev/null
+++ b/src/connector/python/tests/test_info.py
@@ -0,0 +1,23 @@
+from taos.cinterface import *
+
+from taos import *
+
+import pytest
+
+@pytest.fixture
+def conn():
+ return connect()
+
+def test_client_info():
+ print(taos_get_client_info())
+ None
+
+def test_server_info(conn):
+ # type: (TaosConnection) -> None
+ print(conn.client_info)
+ print(conn.server_info)
+ None
+
+if __name__ == "__main__":
+ test_client_info()
+ test_server_info(connect())
diff --git a/src/connector/python/tests/test_lines.py b/src/connector/python/tests/test_lines.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd9d2cdb39d6f4f2612581ce7284c057c456ef91
--- /dev/null
+++ b/src/connector/python/tests/test_lines.py
@@ -0,0 +1,57 @@
+from taos.error import OperationalError
+from taos import connect, new_bind_params, PrecisionEnum
+from taos import *
+
+from ctypes import *
+import taos
+import pytest
+
+
+@pytest.fixture
+def conn():
+ # type: () -> taos.TaosConnection
+ return connect()
+
+
+def test_insert_lines(conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_insert_lines"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s precision 'us'" % dbname)
+ conn.select_db(dbname)
+
+ lines = [
+ 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns',
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns',
+ 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+ ]
+ conn.insert_lines(lines)
+ print("inserted")
+
+ lines = [
+ 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000ns',
+ ]
+ conn.insert_lines(lines)
+ print("inserted")
+ result = conn.query("select * from st")
+ print(*result.fields)
+ all = result.rows_iter()
+ for row in all:
+ print(row)
+ result.close()
+ print(result.row_count)
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ print(err)
+ raise err
+
+
+if __name__ == "__main__":
+ test_insert_lines(connect())
diff --git a/src/connector/python/tests/test_query.py b/src/connector/python/tests/test_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4e139b1f14df29e8b6304dd2ca03519ea274f43
--- /dev/null
+++ b/src/connector/python/tests/test_query.py
@@ -0,0 +1,43 @@
+from datetime import datetime
+import taos
+import pytest
+
+@pytest.fixture
+def conn():
+ return taos.connect()
+
+def test_query(conn):
+ """This test will use fetch_block for rows fetching, significantly faster than rows_iter"""
+ result = conn.query("select * from log.log limit 10000")
+ fields = result.fields
+ for field in fields:
+ print("field: %s" % field)
+ start = datetime.now()
+ for row in result:
+ # print(row)
+ None
+ end = datetime.now()
+ elapsed = end - start
+ print("elapsed time: ", elapsed)
+ result.close()
+ conn.close()
+
+def test_query_row_iter(conn):
+ """This test will use fetch_row for each row fetching, this is the only way in async callback"""
+ result = conn.query("select * from log.log limit 10000")
+ fields = result.fields
+ for field in fields:
+ print("field: %s" % field)
+ start = datetime.now()
+ for row in result.rows_iter():
+ # print(row)
+ None
+ end = datetime.now()
+ elapsed = end - start
+ print("elapsed time: ", elapsed)
+ result.close()
+ conn.close()
+
+if __name__ == "__main__":
+ test_query(taos.connect(database = "log"))
+ test_query_row_iter(taos.connect(database = "log"))
diff --git a/src/connector/python/tests/test_query_a.py b/src/connector/python/tests/test_query_a.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b4be5695a87f1fd1017435b13983df7c4f70f06
--- /dev/null
+++ b/src/connector/python/tests/test_query_a.py
@@ -0,0 +1,66 @@
+from taos import *
+from ctypes import *
+import taos
+import pytest
+import time
+
+
+@pytest.fixture
+def conn():
+ return taos.connect()
+
+def fetch_callback(p_param, p_result, num_of_rows):
+ print("fetched ", num_of_rows, "rows")
+ p = cast(p_param, POINTER(Counter))
+ result = TaosResult(p_result)
+
+ if num_of_rows == 0:
+ print("fetching completed")
+ p.contents.done = True
+ result.close()
+ return
+ if num_of_rows < 0:
+ p.contents.done = True
+ result.check_error(num_of_rows)
+ result.close()
+ return None
+
+ for row in result.rows_iter(num_of_rows):
+ # print(row)
+ None
+ p.contents.count += result.row_count
+ result.fetch_rows_a(fetch_callback, p_param)
+
+
+
+def query_callback(p_param, p_result, code):
+ # type: (c_void_p, c_void_p, c_int) -> None
+ if p_result == None:
+ return
+ result = TaosResult(p_result)
+ if code == 0:
+ result.fetch_rows_a(fetch_callback, p_param)
+ result.check_error(code)
+
+
+class Counter(Structure):
+ _fields_ = [("count", c_int), ("done", c_bool)]
+
+ def __str__(self):
+ return "{ count: %d, done: %s }" % (self.count, self.done)
+
+
+def test_query(conn):
+ # type: (TaosConnection) -> None
+ counter = Counter(count=0)
+ conn.query_a("select * from log.log", query_callback, byref(counter))
+
+ while not counter.done:
+ print("wait query callback")
+ time.sleep(1)
+ print(counter)
+ conn.close()
+
+
+if __name__ == "__main__":
+ test_query(taos.connect())
diff --git a/src/connector/python/tests/test_stmt.py b/src/connector/python/tests/test_stmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..938ba10eb3d2377a63f7972deb99dbd47f7de1b2
--- /dev/null
+++ b/src/connector/python/tests/test_stmt.py
@@ -0,0 +1,149 @@
+from taos import *
+
+from ctypes import *
+from datetime import datetime
+import taos
+import pytest
+
+@pytest.fixture
+def conn():
+ # type: () -> taos.TaosConnection
+ return connect()
+
+def test_stmt_insert(conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_stmt"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+
+ conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+ )
+ conn.load_table_info("log")
+
+
+ stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
+ params = new_bind_params(16)
+ params[0].timestamp(1626861392589, PrecisionEnum.Milliseconds)
+ params[1].bool(True)
+ params[2].null()
+ params[3].tinyint(2)
+ params[4].smallint(3)
+ params[5].int(4)
+ params[6].bigint(5)
+ params[7].tinyint_unsigned(6)
+ params[8].smallint_unsigned(7)
+ params[9].int_unsigned(8)
+ params[10].bigint_unsigned(9)
+ params[11].float(10.1)
+ params[12].double(10.11)
+ params[13].binary("hello")
+ params[14].nchar("stmt")
+ params[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
+
+ stmt.bind_param(params)
+ stmt.execute()
+
+ result = stmt.use_result()
+ assert result.affected_rows == 1
+ result.close()
+ stmt.close()
+
+ stmt = conn.statement("select * from log")
+ stmt.execute()
+ result = stmt.use_result()
+ row = result.next()
+ print(row)
+ assert row[2] == None
+ for i in range(3, 11):
+ assert row[i] == i - 1
+ #float == may not work as expected
+ # assert row[10] == c_float(10.1)
+ assert row[12] == 10.11
+ assert row[13] == "hello"
+ assert row[14] == "stmt"
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+def test_stmt_insert_multi(conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_stmt_multi"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+
+ conn.execute(
+ "create table if not exists log(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
+ bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
+ ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)",
+ )
+ conn.load_table_info("log")
+
+ start = datetime.now()
+ stmt = conn.statement("insert into log values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+ params = new_multi_binds(16)
+ params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
+ params[1].bool((True, None, False))
+ params[2].tinyint([-128, -128, None]) # -128 is tinyint null
+ params[3].tinyint([0, 127, None])
+ params[4].smallint([3, None, 2])
+ params[5].int([3, 4, None])
+ params[6].bigint([3, 4, None])
+ params[7].tinyint_unsigned([3, 4, None])
+ params[8].smallint_unsigned([3, 4, None])
+ params[9].int_unsigned([3, 4, None])
+ params[10].bigint_unsigned([3, 4, None])
+ params[11].float([3, None, 1])
+ params[12].double([3, None, 1.2])
+ params[13].binary(["abc", "dddafadfadfadfadfa", None])
+ params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
+ params[15].timestamp([None, None, 1626861392591])
+ stmt.bind_param_batch(params)
+
+ stmt.execute()
+ end = datetime.now()
+ print("elapsed time: ", end - start)
+ result = stmt.use_result()
+ assert result.affected_rows == 3
+ result.close()
+ stmt.close()
+
+ stmt = conn.statement("select * from log")
+ stmt.execute()
+ result = stmt.use_result()
+ for row in result:
+ print(row)
+ result.close()
+
+ stmt.close()
+
+ # start = datetime.now()
+ # conn.query("insert into log values(1626861392660, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)(1626861392661, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)(1626861392662, true, NULL, 0, 3,3,3,3,3,3,3,3.0,3.0, 'abc','涛思数据',NULL)")
+
+ # end = datetime.now()
+ # print("elapsed time: ", end - start)
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+if __name__ == "__main__":
+ test_stmt_insert(connect())
+ test_stmt_insert_multi(connect())
\ No newline at end of file
diff --git a/src/connector/python/tests/test_stream.py b/src/connector/python/tests/test_stream.py
new file mode 100644
index 0000000000000000000000000000000000000000..de6e20928b176e51bc6d350fb01268459f4e7f95
--- /dev/null
+++ b/src/connector/python/tests/test_stream.py
@@ -0,0 +1,70 @@
+from taos.cinterface import *
+from taos.precision import *
+from taos.bind import *
+from taos import *
+from ctypes import *
+import time
+import pytest
+
+
+@pytest.fixture
+def conn():
+ return connect()
+
+
+def stream_callback(p_param, p_result, p_row):
+ # type: (c_void_p, c_void_p, c_void_p) -> None
+
+ if p_result == None or p_row == None:
+ return
+ result = TaosResult(p_result)
+ row = TaosRow(result, p_row)
+ try:
+ ts, count = row()
+ p = cast(p_param, POINTER(Counter))
+ p.contents.count += count
+ print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
+
+ except Exception as err:
+ print(err)
+ raise err
+
+
+class Counter(ctypes.Structure):
+ _fields_ = [
+ ("count", c_int),
+ ]
+
+ def __str__(self):
+ return "%d" % self.count
+
+
+def test_stream(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_stream"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, n int)")
+
+ result = conn.query("select count(*) from log interval(5s)")
+ assert result.field_count == 2
+ counter = Counter()
+ counter.count = 0
+ stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
+
+ for _ in range(0, 20):
+ conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
+ time.sleep(2)
+ stream.close()
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_stream(connect())
diff --git a/src/connector/python/tests/test_subscribe.py b/src/connector/python/tests/test_subscribe.py
new file mode 100644
index 0000000000000000000000000000000000000000..99fe5b263625c63200f416ec98fcb561773becd8
--- /dev/null
+++ b/src/connector/python/tests/test_subscribe.py
@@ -0,0 +1,100 @@
+from taos.subscription import TaosSubscription
+from taos import *
+from ctypes import *
+import taos
+import pytest
+import time
+from random import random
+
+
+@pytest.fixture
+def conn():
+ return taos.connect()
+
+
+def test_subscribe(conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "pytest_taos_subscribe_callback"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, n int)")
+ for i in range(10):
+ conn.execute("insert into log values(now, %d)" % i)
+
+ sub = conn.subscribe(True, "test", "select * from log", 1000)
+ print("# consume from begin")
+ for ts, n in sub.consume():
+ print(ts, n)
+
+ print("# consume new data")
+ for i in range(5):
+ conn.execute("insert into log values(now, %d)(now+1s, %d)" % (i, i))
+ result = sub.consume()
+ for ts, n in result:
+ print(ts, n)
+
+ print("# consume with a stop condition")
+ for i in range(10):
+ conn.execute("insert into log values(now, %d)" % int(random() * 10))
+ result = sub.consume()
+ try:
+ ts, n = next(result)
+ print(ts, n)
+ if n > 5:
+ result.stop_query()
+ print("## stopped")
+ break
+ except StopIteration:
+ continue
+
+ sub.close()
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+def subscribe_callback(p_sub, p_result, p_param, errno):
+ # type: (c_void_p, c_void_p, c_void_p, c_int) -> None
+ print("callback")
+ result = TaosResult(p_result)
+ result.check_error(errno)
+ for row in result.rows_iter():
+ ts, n = row()
+ print(ts, n)
+
+
+def test_subscribe_callback(conn):
+ # type: (TaosConnection) -> None
+ dbname = "pytest_taos_subscribe_callback"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s" % dbname)
+ conn.select_db(dbname)
+ conn.execute("create table if not exists log(ts timestamp, n int)")
+
+ print("# subscribe with callback")
+ sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback)
+
+ for i in range(10):
+ conn.execute("insert into log values(now, %d)" % i)
+ time.sleep(0.7)
+ sub.close()
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ raise err
+
+
+if __name__ == "__main__":
+ test_subscribe(taos.connect())
+ test_subscribe_callback(taos.connect())
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index f539e7725315d2358767624ce74a8e9609a0b425..aac5a1c665c1417069b3978d10c7b1406a6b02a4 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -476,21 +476,23 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
- int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
+ int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_MEM_ROW_DATA_HEAD_SIZE + pObj->rowSize;
char *buffer = calloc(size, 1);
SWalHead *pHead = (SWalHead *)buffer;
SSubmitMsg *pMsg = (SSubmitMsg *) (buffer + sizeof(SWalHead));
SSubmitBlk *pBlk = (SSubmitBlk *) (buffer + sizeof(SWalHead) + sizeof(SSubmitMsg));
- SDataRow trow = (SDataRow)pBlk->data;
- tdInitDataRow(trow, pSchema);
+ SMemRow trow = (SMemRow)pBlk->data;
+ SDataRow dataRow = (SDataRow)memRowDataBody(trow);
+ memRowSetType(trow, SMEM_ROW_DATA);
+ tdInitDataRow(dataRow, pSchema);
for (int32_t i = 0; i < pSchema->numOfCols; i++) {
STColumn *c = pSchema->columns + i;
- void* val = row[i];
+ void *val = row[i];
if (val == NULL) {
- val = getNullValue(c->type);
+ val = (void *)getNullValue(c->type);
} else if (c->type == TSDB_DATA_TYPE_BINARY) {
val = ((char*)val) - sizeof(VarDataLenT);
} else if (c->type == TSDB_DATA_TYPE_NCHAR) {
@@ -500,9 +502,9 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
memcpy((char *)val + sizeof(VarDataLenT), buf, len);
varDataLen(val) = len;
}
- tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
+ tdAppendColVal(dataRow, val, c->type, c->offset);
}
- pBlk->dataLen = htonl(dataRowLen(trow));
+ pBlk->dataLen = htonl(memRowDataTLen(trow));
pBlk->schemaLen = 0;
pBlk->uid = htobe64(pObj->uid);
@@ -511,7 +513,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
pBlk->sversion = htonl(pSchema->version);
pBlk->padding = 0;
- pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + dataRowLen(trow);
+ pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + memRowDataTLen(trow);
pMsg->header.vgId = htonl(pContext->vgId);
pMsg->header.contLen = htonl(pHead->len);
diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt
index e7ac1be5b1160df447271eeec78e2939923b6d53..47186130ead0d1ee3f4593b7ef346f8cc47f7cba 100644
--- a/src/dnode/CMakeLists.txt
+++ b/src/dnode/CMakeLists.txt
@@ -18,7 +18,7 @@ ELSE ()
ENDIF ()
ADD_EXECUTABLE(taosd ${SRC})
-TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync ${LINK_JEMALLOC})
+TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC})
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd taos_static)
diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c
index a6bb7a48004f74df844d927e689eb56b7297fdd1..4269c77bf33eb9aa5fc0edafde39f9b4772d53d9 100644
--- a/src/dnode/src/dnodeCfg.c
+++ b/src/dnode/src/dnodeCfg.c
@@ -180,7 +180,7 @@ static int32_t dnodeWriteCfg() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c
index 9554651776ad0e9c961a3d15955491b224d869bf..9b1535364744613f92f8f690d4d6fd5893e191cc 100644
--- a/src/dnode/src/dnodeEps.c
+++ b/src/dnode/src/dnodeEps.c
@@ -277,7 +277,7 @@ static int32_t dnodeWriteEps() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c
index 0dca116d84a8267231acba686018cf6c07ae15e0..611c30b84344eac4306b73eb49e1405cc83289b6 100644
--- a/src/dnode/src/dnodeMInfos.c
+++ b/src/dnode/src/dnodeMInfos.c
@@ -286,7 +286,7 @@ static int32_t dnodeWriteMInfos() {
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
diff --git a/src/dnode/src/dnodeMPeer.c b/src/dnode/src/dnodeMPeer.c
index e4942c49aaba48db8d2a1c74a0af532769de9553..8aa28d1618efe871de7795b0d403ee060cf5941c 100644
--- a/src/dnode/src/dnodeMPeer.c
+++ b/src/dnode/src/dnodeMPeer.c
@@ -150,6 +150,8 @@ static void *dnodeProcessMPeerQueue(void *param) {
SMnodeMsg *pPeerMsg;
int32_t type;
void * unUsed;
+
+ setThreadName("dnodeMPeerQ");
while (1) {
if (taosReadQitemFromQset(tsMPeerQset, &type, (void **)&pPeerMsg, &unUsed) == 0) {
diff --git a/src/dnode/src/dnodeMRead.c b/src/dnode/src/dnodeMRead.c
index 90332e6783bc4861928833d5794f0787f80be993..184a6b743afdd5f4284a5acffc8d518356be4ee4 100644
--- a/src/dnode/src/dnodeMRead.c
+++ b/src/dnode/src/dnodeMRead.c
@@ -155,6 +155,8 @@ static void *dnodeProcessMReadQueue(void *param) {
int32_t type;
void * unUsed;
+ setThreadName("dnodeMReadQ");
+
while (1) {
if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pRead, &unUsed) == 0) {
dDebug("qset:%p, mnode read got no message from qset, exiting", tsMReadQset);
diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c
index a409d537fa8a56f03ed79d68358ac70b780e74e9..904ddc21d019343fa3f679db4d25cd9b01e1d97b 100644
--- a/src/dnode/src/dnodeMWrite.c
+++ b/src/dnode/src/dnodeMWrite.c
@@ -168,7 +168,9 @@ static void *dnodeProcessMWriteQueue(void *param) {
SMnodeMsg *pWrite;
int32_t type;
void * unUsed;
-
+
+ setThreadName("dnodeMWriteQ");
+
while (1) {
if (taosReadQitemFromQset(tsMWriteQset, &type, (void **)&pWrite, &unUsed) == 0) {
dDebug("qset:%p, mnode write got no message from qset, exiting", tsMWriteQset);
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index ab2fcbea6ab02604054c51f0b21bc622ea79a323..5291fb73a05da27f98252b680542a83ec41c94a7 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -40,6 +40,10 @@
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "module.h"
+#include "mnode.h"
+#include "qScript.h"
+#include "tcache.h"
+#include "tscompression.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t moduleStart() { return 0; }
@@ -83,6 +87,29 @@ static SStep tsDnodeSteps[] = {
{"dnode-shell", dnodeInitShell, dnodeCleanupShell},
{"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer},
{"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry},
+ {"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup},
+};
+
+static SStep tsDnodeCompactSteps[] = {
+ {"dnode-tfile", tfInit, tfCleanup},
+ {"dnode-globalcfg", taosCheckGlobalCfg, NULL},
+ {"dnode-storage", dnodeInitStorage, dnodeCleanupStorage},
+ {"dnode-cfg", dnodeInitCfg, dnodeCleanupCfg},
+ {"dnode-eps", dnodeInitEps, dnodeCleanupEps},
+ {"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos},
+ {"dnode-wal", walInit, walCleanUp},
+ {"dnode-sync", syncInit, syncCleanUp},
+ {"dnode-vread", dnodeInitVRead, dnodeCleanupVRead},
+ {"dnode-vwrite", dnodeInitVWrite, dnodeCleanupVWrite},
+ {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt},
+ {"dnode-mread", dnodeInitMRead, NULL},
+ {"dnode-mwrite", dnodeInitMWrite, NULL},
+ {"dnode-mpeer", dnodeInitMPeer, NULL},
+ {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes},
+ {"dnode-modules", dnodeInitModules, dnodeCleanupModules},
+ {"dnode-mread", NULL, dnodeCleanupMRead},
+ {"dnode-mwrite", NULL, dnodeCleanupMWrite},
+ {"dnode-mpeer", NULL, dnodeCleanupMPeer},
};
static int dnodeCreateDir(const char *dir) {
@@ -94,13 +121,23 @@ static int dnodeCreateDir(const char *dir) {
}
static void dnodeCleanupComponents() {
- int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep);
- dnodeStepCleanup(tsDnodeSteps, stepSize);
+ if (!tsCompactMnodeWal) {
+ int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep);
+ dnodeStepCleanup(tsDnodeSteps, stepSize);
+ } else {
+ int32_t stepSize = sizeof(tsDnodeCompactSteps) / sizeof(SStep);
+ dnodeStepCleanup(tsDnodeCompactSteps, stepSize);
+ }
}
static int32_t dnodeInitComponents() {
- int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep);
- return dnodeStepInit(tsDnodeSteps, stepSize);
+ if (!tsCompactMnodeWal) {
+ int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep);
+ return dnodeStepInit(tsDnodeSteps, stepSize);
+ } else {
+ int32_t stepSize = sizeof(tsDnodeCompactSteps) / sizeof(SStep);
+ return dnodeStepInit(tsDnodeCompactSteps, stepSize);
+ }
}
static int32_t dnodeInitTmr() {
@@ -173,6 +210,7 @@ void dnodeCleanUpSystem() {
dnodeCleanupComponents();
taos_cleanup();
taosCloseLog();
+ taosStopCacheRefreshWorker();
}
}
@@ -202,6 +240,12 @@ static void dnodeCheckDataDirOpenned(char *dir) {
}
static int32_t dnodeInitStorage() {
+#ifdef TD_TSZ
+ // compress module init
+ tsCompressInit();
+#endif
+
+ // storage module init
if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno));
return -1;
@@ -217,7 +261,24 @@ static int32_t dnodeInitStorage() {
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
// sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
- //TODO(dengyihao): no need to init here
+ if (tsCompactMnodeWal == 1) {
+ sprintf(tsMnodeTmpDir, "%s/mnode_tmp", tsDataDir);
+ if (taosDirExist(tsMnodeTmpDir)) {
+ dError("mnode_tmp dir already exist in %s,quit compact job", tsMnodeTmpDir);
+ return -1;
+ }
+ if (dnodeCreateDir(tsMnodeTmpDir) < 0) {
+ dError("failed to create dir: %s, reason: %s", tsMnodeTmpDir, strerror(errno));
+ return -1;
+ }
+
+ sprintf(tsMnodeBakDir, "%s/mnode_bak", tsDataDir);
+ if (taosDirExist(tsMnodeBakDir)) {
+ dError("mnode_bak dir already exist in %s,quit compact job", tsMnodeBakDir);
+ return -1;
+ }
+ }
+ //TODO(dengyihao): no need to init here
if (dnodeCreateDir(tsMnodeDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno));
return -1;
@@ -260,7 +321,15 @@ static int32_t dnodeInitStorage() {
return 0;
}
-static void dnodeCleanupStorage() { tfsDestroy(); }
+static void dnodeCleanupStorage() {
+ // storage destroy
+ tfsDestroy();
+
+ #ifdef TD_TSZ
+ // compress destroy
+ tsCompressExit();
+ #endif
+}
bool dnodeIsFirstDeploy() {
return strcmp(tsFirst, tsLocalEp) == 0;
diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c
index b8ce1c802b2475a8aab76a6bb7f0264d2c9b39a0..08269c0bf6141974366936660bee326682cd90f5 100644
--- a/src/dnode/src/dnodePeer.c
+++ b/src/dnode/src/dnodePeer.c
@@ -47,7 +47,8 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToVMgmtQueue;
- dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeDispatchToVMgmtQueue;
+ dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeDispatchToVMgmtQueue;
+ dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = dnodeDispatchToVMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMPeerQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMPeerQueue;
diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c
index 7bc1fd9140e01897e3ae6ad367e40556526daf99..5606681f0f931070e9cbf21d6b98b0d2eb51bdfa 100644
--- a/src/dnode/src/dnodeShell.c
+++ b/src/dnode/src/dnodeShell.c
@@ -48,9 +48,11 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
+ dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
+ dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE]= dnodeDispatchToMWriteQueue;
@@ -61,6 +63,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE]= dnodeDispatchToMWriteQueue;
+ dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_COMPACT_VNODE]= dnodeDispatchToMWriteQueue;
// the following message shall be treated as mnode query
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = dnodeDispatchToMReadQueue;
@@ -71,6 +74,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue;
+ dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep;
diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c
index c3e4dae2062707ef4ec7e67ed137875cb45c2142..2f77788025e6d5f36460ceb866b64d54736af6a1 100644
--- a/src/dnode/src/dnodeSystem.c
+++ b/src/dnode/src/dnodeSystem.c
@@ -44,6 +44,8 @@ int32_t main(int32_t argc, char *argv[]) {
dump_config = 1;
} else if (strcmp(argv[i], "--force-keep-file") == 0) {
tsdbForceKeepFile = true;
+ } else if (strcmp(argv[i], "--compact-mnode-wal") == 0) {
+ tsCompactMnodeWal = 1;
} else if (strcmp(argv[i], "-V") == 0) {
#ifdef _ACCT
char *versionStr = "enterprise";
diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c
index 4caece16612353155886ff20055433878ec7411c..22a6dc5b1993b6d15510b078ac4245909221ae78 100644
--- a/src/dnode/src/dnodeTelemetry.c
+++ b/src/dnode/src/dnodeTelemetry.c
@@ -245,6 +245,8 @@ static void* telemetryThread(void* param) {
clock_gettime(CLOCK_REALTIME, &end);
end.tv_sec += 300; // wait 5 minutes before send first report
+ setThreadName("telemetry");
+
while (!tsExit) {
int r = 0;
struct timespec ts = end;
diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c
index 41e48411ac29680e8f44ec36f083f8807508f376..c1bfb1460b4b3058434c628f503a4775c4c24701 100644
--- a/src/dnode/src/dnodeVMgmt.c
+++ b/src/dnode/src/dnodeVMgmt.c
@@ -31,6 +31,7 @@ static void * dnodeProcessMgmtQueue(void *param);
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *pMsg);
+static int32_t dnodeProcessCompactVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg);
static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg);
@@ -40,7 +41,8 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg);
int32_t dnodeInitVMgmt() {
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg;
- dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeProcessSyncVnodeMsg;
+ dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeProcessSyncVnodeMsg;
+ dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE]= dnodeProcessCompactVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg;
dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg;
@@ -101,6 +103,8 @@ static void *dnodeProcessMgmtQueue(void *wparam) {
int32_t qtype;
void * handle;
+ setThreadName("dnodeMgmtQ");
+
while (1) {
if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) {
dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset);
@@ -154,7 +158,6 @@ static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) {
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
SCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg);
-
void *pVnode = vnodeAcquire(pCreate->cfg.vgId);
if (pVnode != NULL) {
dDebug("vgId:%d, already exist, return success", pCreate->cfg.vgId);
@@ -188,6 +191,12 @@ static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *rpcMsg) {
return vnodeSync(pSyncVnode->vgId);
}
+static int32_t dnodeProcessCompactVnodeMsg(SRpcMsg *rpcMsg) {
+ SCompactVnodeMsg *pCompactVnode = rpcMsg->pCont;
+ pCompactVnode->vgId = htonl(pCompactVnode->vgId);
+ return vnodeCompact(pCompactVnode->vgId);
+}
+
static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) {
SDropVnodeMsg *pDrop = rpcMsg->pCont;
pDrop->vgId = htonl(pDrop->vgId);
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index 41016d7b99d049922e4de7dc0cbd3dafd2bc4ebf..c404ab1a55c3788f5756c99f7914764e6e9af295 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -119,6 +119,12 @@ static void *dnodeProcessReadQueue(void *wparam) {
int32_t qtype;
void * pVnode;
+ char* threadname = strcmp(pPool->name, "vquery") == 0? "dnodeQueryQ":"dnodeFetchQ";
+
+ char name[16] = {0};
+ snprintf(name, tListLen(name), "%s", threadname);
+ setThreadName(name);
+
while (1) {
if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) {
dDebug("dnode vquery got no message from qset:%p, exiting", pPool->qset);
diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c
index bbf257ff953779fd9d097ba82e1b42c0b91d1531..ed2a6e210939e907e89dd07de27481d385e4ef24 100644
--- a/src/dnode/src/dnodeVWrite.c
+++ b/src/dnode/src/dnodeVWrite.c
@@ -191,6 +191,8 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
taosBlockSIGPIPE();
dDebug("dnode vwrite worker:%d is running", pWorker->workerId);
+ setThreadName("dnodeWriteQ");
+
while (1) {
numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode);
if (numOfMsgs == 0) {
@@ -202,12 +204,12 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite);
dTrace("msg:%p, app:%p type:%s will be processed in vwrite queue, qtype:%s hver:%" PRIu64, pWrite,
- pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version);
+ pWrite->rpcMsg.ahandle, taosMsg[pWrite->walHead.msgType], qtypeStr[qtype], pWrite->walHead.version);
- pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite);
+ pWrite->code = vnodeProcessWrite(pVnode, &pWrite->walHead, qtype, pWrite);
if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1);
if (pWrite->code > 0) pWrite->code = 0;
- if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true;
+ if (pWrite->code == 0 && pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true;
dTrace("msg:%p is processed in vwrite queue, code:0x%x", pWrite, pWrite->code);
}
@@ -222,7 +224,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code);
} else {
if (qtype == TAOS_QTYPE_FWD) {
- vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT);
+ vnodeConfirmForward(pVnode, pWrite->walHead.version, pWrite->code, pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT);
}
if (pWrite->rspRet.rsp) {
rpcFreeCont(pWrite->rspRet.rsp);
diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c
index f01a510370758a04fe8972304ae352b796dc6e35..a5b0e9fe30e88f89af2e79af16602dac9500a305 100644
--- a/src/dnode/src/dnodeVnodes.c
+++ b/src/dnode/src/dnodeVnodes.c
@@ -90,6 +90,7 @@ static void *dnodeOpenVnode(void *param) {
char stepDesc[TSDB_STEP_DESC_LEN] = {0};
dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum);
+ setThreadName("dnodeOpenVnode");
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
int32_t vgId = pThread->vnodeList[v];
diff --git a/src/inc/mnode.h b/src/inc/mnode.h
index 2495a42ba2e5d23cb361e2d64de04d1f710764ea..203ac57469c5bf7e1aa226c63d9bd32ffe068b46 100644
--- a/src/inc/mnode.h
+++ b/src/inc/mnode.h
@@ -73,6 +73,9 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg);
void mnodeProcessPeerRsp(SRpcMsg *pMsg);
int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey);
+int32_t mnodeCompactWal();
+int32_t mnodeCompactComponents();
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/monitor.h b/src/inc/monitor.h
index 1aefb0b84887270c7997c381402096006fe6eaee..d2e5e06487dbdf311cef6da125d7ba3050b53a4d 100644
--- a/src/inc/monitor.h
+++ b/src/inc/monitor.h
@@ -54,7 +54,8 @@ void monCleanupSystem();
void monSaveAcctLog(SAcctMonitorObj *pMonObj);
void monSaveLog(int32_t level, const char *const format, ...);
void monExecuteSQL(char *sql);
-
+typedef void (*MonExecuteSQLCbFP)(void *param, TAOS_RES *, int code);
+void monExecuteSQLWithResultCallback(char *sql, MonExecuteSQLCbFP callback, void* param);
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/query.h b/src/inc/query.h
index 38078cf21fcededd96dad833bbf1d22de55bb8ac..fb9cbff8584892b4a6bc6e4a6ce046a7500aef39 100644
--- a/src/inc/query.h
+++ b/src/inc/query.h
@@ -28,7 +28,7 @@ typedef void* qinfo_t;
* @param qinfo
* @return
*/
-int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, qinfo_t* qinfo, uint64_t *qId);
+int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, qinfo_t* qinfo, uint64_t qId);
/**
diff --git a/src/inc/taos.h b/src/inc/taos.h
index 3d3797b50b5793978c9cd84d03ba35fba456e8f9..6fa30737e71e8f40cee817386ad4d2c26661777f 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -72,6 +72,7 @@ DLL_EXPORT int taos_init();
DLL_EXPORT void taos_cleanup(void);
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
+DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
const char *taos_data_type(int type);
@@ -82,6 +83,7 @@ typedef struct TAOS_BIND {
uintptr_t buffer_length; // unused
uintptr_t *length;
int * is_null;
+
int is_unsigned; // unused
int * error; // unused
union {
@@ -99,16 +101,32 @@ typedef struct TAOS_BIND {
unsigned int allocated;
} TAOS_BIND;
+typedef struct TAOS_MULTI_BIND {
+ int buffer_type;
+ void *buffer;
+ uintptr_t buffer_length;
+ int32_t *length;
+ char *is_null;
+ int num;
+} TAOS_MULTI_BIND;
+
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
+DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags);
+DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name);
+DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name);
+
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
-int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
+DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind);
+DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind);
+DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx);
DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt);
+DLL_EXPORT char * taos_stmt_errstr(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
@@ -122,11 +140,10 @@ DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
-
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
-int taos_validate_sql(TAOS *taos, const char *sql);
+DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
-int* taos_fetch_lengths(TAOS_RES *res);
+DLL_EXPORT int* taos_fetch_lengths(TAOS_RES *res);
// TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild);
// TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild);
@@ -153,6 +170,8 @@ DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr);
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList);
+DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 672d460f2ce571d23d9bc5fd6fd8a104f39eb636..bc4ddbe067b0997695ef22bbcc21228df9e92199 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -22,7 +22,6 @@ extern "C" {
#include
#include
-#include "osDef.h"
#include "taos.h"
#define TSDB__packed
@@ -34,6 +33,8 @@ extern "C" {
#endif
#define TSWINDOW_INITIALIZER ((STimeWindow) {INT64_MIN, INT64_MAX})
+#define TSWINDOW_DESC_INITIALIZER ((STimeWindow) {INT64_MAX, INT64_MIN})
+
#define TSKEY_INITIAL_VAL INT64_MIN
// Bytes for each type.
@@ -80,10 +81,14 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DEFAULT_USER "root"
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
+#elif (_TD_TQ_ == true)
+#define TSDB_DEFAULT_PASS "tqueue"
#else
#define TSDB_DEFAULT_PASS "taosdata"
#endif
+#define SHELL_MAX_PASSWORD_LEN 20
+
#define TSDB_TRUE 1
#define TSDB_FALSE 0
#define TSDB_OK 0
@@ -99,7 +104,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_TIME_PRECISION_MICRO_STR "us"
#define TSDB_TIME_PRECISION_NANO_STR "ns"
-#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
+#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)))
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#define T_APPEND_MEMBER(dst, ptr, type, member) \
@@ -165,7 +170,7 @@ do { \
#define TSDB_BINARY_OP_REMAINDER 34
-#define IS_RELATION_OPTR(op) (((op) >= TSDB_RELATION_LESS) && ((op) <= TSDB_RELATION_IN))
+#define IS_RELATION_OPTR(op) (((op) >= TSDB_RELATION_LESS) && ((op) < TSDB_RELATION_IN))
#define IS_ARITHMETIC_OPTR(op) (((op) >= TSDB_BINARY_OP_ADD) && ((op) <= TSDB_BINARY_OP_REMAINDER))
#define TS_PATH_DELIMITER_LEN 1
@@ -177,12 +182,16 @@ do { \
// this is the length of its string representation, including the terminator zero
#define TSDB_ACCT_ID_LEN 11
-#define TSDB_MAX_COLUMNS 1024
+#define TSDB_MAX_COLUMNS 4096
#define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns
#define TSDB_NODE_NAME_LEN 64
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 33
+#define TSDB_FUNC_NAME_LEN 65
+#define TSDB_FUNC_CODE_LEN (65535 - 512)
+#define TSDB_FUNC_BUF_SIZE 512
+#define TSDB_TYPE_STR_MAX_LEN 32
#define TSDB_TABLE_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN)
#define TSDB_COL_NAME_LEN 65
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
@@ -192,7 +201,13 @@ do { \
#define TSDB_APPNAME_LEN TSDB_UNI_LEN
-#define TSDB_MAX_BYTES_PER_ROW 16384
+ /**
+ * In some scenarios uint16_t (0~65535) is used to store the row len.
+ * - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
+ * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus
+ * the final value is 65531-(4096-1)*4 = 49151.
+ */
+#define TSDB_MAX_BYTES_PER_ROW 49151
#define TSDB_MAX_TAGS_LEN 16384
#define TSDB_MAX_TAGS 128
#define TSDB_MAX_TAG_CONDITIONS 1024
@@ -243,8 +258,8 @@ do { \
#define TSDB_MAX_REPLICA 5
#define TSDB_TBNAME_COLUMN_INDEX (-1)
-#define TSDB_BLOCK_DIST_COLUMN_INDEX (-2)
-#define TSDB_UD_COLUMN_INDEX (-100)
+#define TSDB_UD_COLUMN_INDEX (-1000)
+#define TSDB_RES_COL_ID (-5000)
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
@@ -294,11 +309,11 @@ do { \
#define TSDB_DEFAULT_WAL_LEVEL 1
#define TSDB_MIN_DB_UPDATE 0
-#define TSDB_MAX_DB_UPDATE 1
+#define TSDB_MAX_DB_UPDATE 2
#define TSDB_DEFAULT_DB_UPDATE_OPTION 0
#define TSDB_MIN_DB_CACHE_LAST_ROW 0
-#define TSDB_MAX_DB_CACHE_LAST_ROW 1
+#define TSDB_MAX_DB_CACHE_LAST_ROW 3
#define TSDB_DEFAULT_CACHE_LAST_ROW 0
#define TSDB_MIN_FSYNC_PERIOD 0
@@ -320,8 +335,9 @@ do { \
#define TSDB_MAX_JOIN_TABLE_NUM 10
#define TSDB_MAX_UNION_CLAUSE 5
-#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)
-#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)
+#define TSDB_MAX_FIELD_LEN 16384
+#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384
+#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384
#define PRIMARYKEY_TIMESTAMP_COL_INDEX 0
#define TSDB_MAX_RPC_THREADS 5
@@ -329,6 +345,10 @@ do { \
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode
+#define TSDB_UDF_TYPE_SCALAR 1
+#define TSDB_UDF_TYPE_AGGREGATE 2
+
+
/*
* 1. ordinary sub query for select * from super_table
* 2. all sqlobj generated by createSubqueryObj with this flag
@@ -345,7 +365,9 @@ do { \
#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u
#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
+#define TSDB_QUERY_TYPE_FILE_INSERT 0x400u // insert data from file
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
+#define TSDB_QUERY_TYPE_NEST_SUBQUERY 0x1000u // nested sub query
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
@@ -390,9 +412,10 @@ typedef enum {
typedef enum {
TSDB_SUPER_TABLE = 0, // super table
TSDB_CHILD_TABLE = 1, // table created from super table
- TSDB_NORMAL_TABLE = 2, // ordinary table
- TSDB_STREAM_TABLE = 3, // table created from stream computing
- TSDB_TABLE_MAX = 4
+ TSDB_NORMAL_TABLE = 2, // ordinary table
+ TSDB_STREAM_TABLE = 3, // table created from stream computing
+ TSDB_TEMP_TABLE = 4, // temp table created by nest query
+ TSDB_TABLE_MAX = 5
} ETableType;
typedef enum {
@@ -415,6 +438,12 @@ typedef enum {
TSDB_CHECK_ITEM_MAX
} ECheckItemType;
+typedef enum {
+ TD_ROW_DISCARD_UPDATE = 0,
+ TD_ROW_OVERWRITE_UPDATE = 1,
+ TD_ROW_PARTIAL_UPDATE = 2
+} TDUpdateConfig;
+
extern char *qtypeStr[];
#ifdef __cplusplus
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 619869efa58cda04a2e12b415a8e07749f985729..000703464cfb8c687e473b7559e1048c42f8a6de 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -74,7 +74,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_REF_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x010A) //"Ref is not there")
//client
-#define TSDB_CODE_TSC_INVALID_SQL TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid SQL statement")
+#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) //"Invalid Operation")
#define TSDB_CODE_TSC_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0201) //"Invalid qhandle")
#define TSDB_CODE_TSC_INVALID_TIME_STAMP TAOS_DEF_ERROR_CODE(0, 0x0202) //"Invalid combination of client/service time")
#define TSDB_CODE_TSC_INVALID_VALUE TAOS_DEF_ERROR_CODE(0, 0x0203) //"Invalid value in client")
@@ -100,6 +100,12 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217) //"Database not specified or available")
#define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218) //"Table does not exist")
#define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config")
+#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty")
+#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line")
+#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached")
+#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names")
+#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length")
+#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length")
// mnode
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
@@ -173,6 +179,17 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist")
#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist")
#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message")
+#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes")
+
+#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name")
+#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length")
+#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code")
+#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists")
+#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func")
+#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize")
+
+#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length")
+#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length")
#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available")
#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists")
@@ -245,6 +262,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk")
#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message")
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
+#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data")
// query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
@@ -260,6 +278,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query")
#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached")
#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica")
+#define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error")
// grant
@@ -394,6 +413,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find")
#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string")
+#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error")
+
// odbc
#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory")
#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input")
@@ -429,6 +450,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level")
#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk")
+// monitor
+#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection")
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index 2d782108440971d31af16ffeb1a6d5f2a2dc205c..d3f441e72f3e1ac5675bdce958cdf5d4cf58171c 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -61,9 +61,11 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CONFIG_DNODE, "config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_VNODE, "alter-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_SYNC_VNODE, "sync-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_MNODE, "create-mnode" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_COMPACT_VNODE, "compact-vnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY6, "dummy6" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY7, "dummy7" )
+
// message from client to mnode
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CONNECT, "connect" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_ACCT, "create-acct" )
@@ -75,8 +77,10 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_USER, "drop-user" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DNODE, "create-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DNODE, "drop-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" )
-TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
-TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_FUNCTION, "create-function" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_FUNCTION, "drop-function" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
@@ -84,7 +88,8 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_TABLE_META, "table-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_STABLE_VGROUP, "stable-vgroup" )
-TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_TABLES_META, "tables-meta" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_COMPACT_VNODE, "compact-vnode" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_TABLES_META, "multiTable-meta" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_STREAM, "alter-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SHOW, "show" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_RETRIEVE, "retrieve" )
@@ -93,7 +98,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_STREAM, "kill-stream" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_CONN, "kill-conn" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CONFIG_DNODE, "cm-config-dnode" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_HEARTBEAT, "heartbeat" )
-TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY8, "dummy8" )
+TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_RETRIEVE_FUNC, "retrieve-func" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY9, "dummy9" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY10, "dummy10" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY11, "dummy11" )
@@ -150,6 +155,7 @@ enum _mgmt_table {
TSDB_MGMT_TABLE_STREAMTABLES,
TSDB_MGMT_TABLE_CLUSTER,
TSDB_MGMT_TABLE_TP,
+ TSDB_MGMT_TABLE_FUNCTION,
TSDB_MGMT_TABLE_MAX,
};
@@ -161,6 +167,7 @@ enum _mgmt_table {
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
+#define TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN 8
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1
@@ -294,6 +301,8 @@ typedef struct {
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
+ // if user specify DROP STABLE, this flag will be set. And an error will be returned if it is not a super table
+ int8_t supertable;
int8_t igNotExists;
} SCMDropTableMsg;
@@ -390,16 +399,15 @@ typedef struct {
typedef struct {
int32_t vgId;
-} SDropVnodeMsg, SSyncVnodeMsg;
+} SDropVnodeMsg, SSyncVnodeMsg, SCompactVnodeMsg;
typedef struct SColIndex {
int16_t colId; // column id
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
uint16_t flag; // denote if it is a tag or a normal column
- char name[TSDB_COL_NAME_LEN]; // TODO remove it
+ char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1];
} SColIndex;
-
typedef struct SColumnFilterInfo {
int16_t lowerRelOptr;
int16_t upperRelOptr;
@@ -421,42 +429,13 @@ typedef struct SColumnFilterInfo {
};
} SColumnFilterInfo;
-/* sql function msg, to describe the message to vnode about sql function
- * operations in select clause */
-typedef struct SSqlFuncMsg {
- int16_t functionId;
- int16_t numOfParams;
-
- int16_t resColId; // result column id, id of the current output column
- int16_t colType;
- int16_t colBytes;
-
- SColIndex colInfo;
- struct ArgElem {
- int16_t argType;
- int16_t argBytes;
- union {
- double d;
- int64_t i64;
- char * pz;
- } argValue;
- } arg[3];
-
- int32_t filterNum;
- SColumnFilterInfo filterInfo[];
-} SSqlFuncMsg;
-
-
-typedef struct SExprInfo {
- SColumnFilterInfo * pFilter;
- struct tExprNode* pExpr;
- int16_t bytes;
- int16_t type;
- int32_t interBytes;
- int64_t uid;
- SSqlFuncMsg base;
-} SExprInfo;
-
+typedef struct SColumnFilterList {
+ int16_t numOfFilters;
+ union{
+ int64_t placeholder;
+ SColumnFilterInfo *filterInfo;
+ };
+} SColumnFilterList;
/*
* for client side struct, we only need the column id, type, bytes are not necessary
* But for data in vnode side, we need all the following information.
@@ -465,11 +444,7 @@ typedef struct SColumnInfo {
int16_t colId;
int16_t type;
int16_t bytes;
- int16_t numOfFilters;
- union{
- int64_t placeholder;
- SColumnFilterInfo *filters;
- };
+ SColumnFilterList flist;
} SColumnInfo;
typedef struct STableIdInfo {
@@ -483,10 +458,31 @@ typedef struct STimeWindow {
TSKEY ekey;
} STimeWindow;
+typedef struct {
+ int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
+ int32_t tsLen; // total length of ts comp block
+ int32_t tsNumOfBlocks; // ts comp block numbers
+ int32_t tsOrder; // ts comp block order
+} STsBufInfo;
+
typedef struct {
SMsgHead head;
char version[TSDB_VERSION_LEN];
+ bool stableQuery; // super table query or not
+ bool topBotQuery; // TODO used bitwise flag
+ bool interpQuery; // interp query or not
+ bool groupbyColumn; // denote if this is a groupby normal column query
+ bool hasTagResults; // if there are tag values in final result or not
+ bool timeWindowInterpo;// if the time window start/end required interpolation
+ bool queryBlockDist; // if query data block distribution
+ bool stabledev; // super table stddev query
+ bool tsCompQuery; // is tscomp query
+ bool simpleAgg;
+ bool pointInterpQuery; // point interpolation query
+ bool needReverseScan; // need reverse scan
+ bool stateWindow; // state window flag
+
STimeWindow window;
int32_t numOfTables;
int16_t order;
@@ -509,14 +505,16 @@ typedef struct {
int16_t fillType; // interpolate type
uint64_t fillVal; // default value array list
int32_t secondStageOutput;
- int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
- int32_t tsLen; // total length of ts comp block
- int32_t tsNumOfBlocks; // ts comp block numbers
- int32_t tsOrder; // ts comp block order
+ STsBufInfo tsBuf; // tsBuf info
int32_t numOfTags; // number of tags columns involved
int32_t sqlstrLen; // sql query string
int32_t prevResultLen; // previous result length
- SColumnInfo colList[];
+ int32_t numOfOperator;
+ int32_t tableScanOperator;// table scan operator. -1 means no scan operator
+ int32_t udfNum; // number of udf function
+ int32_t udfContentOffset;
+ int32_t udfContentLen;
+ SColumnInfo tableCols[];
} SQueryTableMsg;
typedef struct {
@@ -551,7 +549,7 @@ typedef struct {
uint8_t status;
uint8_t role;
uint8_t replica;
- uint8_t reserved;
+ uint8_t compact;
} SVnodeLoad;
typedef struct {
@@ -560,7 +558,7 @@ typedef struct {
int32_t totalBlocks;
int32_t maxTables;
int32_t daysPerFile;
- int32_t daysToKeep;
+ int32_t daysToKeep0;
int32_t daysToKeep1;
int32_t daysToKeep2;
int32_t minRowsPerFileBlock;
@@ -580,6 +578,41 @@ typedef struct {
int8_t reserve[5];
} SCreateDbMsg, SAlterDbMsg;
+typedef struct {
+ char name[TSDB_FUNC_NAME_LEN];
+ char path[PATH_MAX];
+ int32_t funcType;
+ uint8_t outputType;
+ int16_t outputLen;
+ int32_t bufSize;
+ int32_t codeLen;
+ char code[];
+} SCreateFuncMsg;
+
+typedef struct {
+ int32_t num;
+ char name[];
+} SRetrieveFuncMsg;
+
+typedef struct {
+ char name[TSDB_FUNC_NAME_LEN];
+ int32_t funcType;
+ int8_t resType;
+ int16_t resBytes;
+ int32_t bufSize;
+ int32_t len;
+ char content[];
+} SFunctionInfoMsg;
+
+typedef struct {
+ int32_t num;
+ char content[];
+} SUdfFuncMsg;
+
+typedef struct {
+ char name[TSDB_FUNC_NAME_LEN];
+} SDropFuncMsg;
+
typedef struct {
char db[TSDB_TABLE_FNAME_LEN];
uint8_t ignoreNotExists;
@@ -719,8 +752,11 @@ typedef struct {
} STableInfoMsg;
typedef struct {
+ uint8_t metaClone; // create local clone of the cached table meta
+ int32_t numOfVgroups;
int32_t numOfTables;
- char tableIds[];
+ int32_t numOfUdfs;
+ char tableNames[];
} SMultiTableInfoMsg;
typedef struct SSTableVgroupMsg {
@@ -769,8 +805,13 @@ typedef struct STableMetaMsg {
typedef struct SMultiTableMeta {
int32_t numOfTables;
+ int32_t numOfVgroup;
+ int32_t numOfUdf;
int32_t contLen;
- char metas[];
+ uint8_t compressed; // denote if compressed or not
+ uint32_t rawLen; // size before compress
+ uint8_t metaClone; // make meta clone after retrieve meta from mnode
+ char meta[];
} SMultiTableMeta;
typedef struct {
@@ -791,6 +832,12 @@ typedef struct {
char payload[];
} SShowMsg;
+typedef struct {
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
+ int32_t numOfVgroup;
+ int32_t vgid[];
+} SCompactMsg;
+
typedef struct SShowRsp {
uint64_t qhandle;
STableMetaMsg tableMeta;
@@ -827,7 +874,11 @@ typedef struct {
uint32_t queryId;
int64_t useconds;
int64_t stime;
- uint64_t qHandle;
+ uint64_t qId;
+ uint64_t sqlObjId;
+ int32_t pid;
+ char fqdn[TSDB_FQDN_LEN];
+ int32_t numOfSub;
} SQueryDesc;
typedef struct {
diff --git a/src/inc/tfs.h b/src/inc/tfs.h
index 4ed21bc6e1ddb4d9011d701ad99e6f2a71f54b0e..e72620eca6965d78609bbbd283fff8085d08a4b8 100644
--- a/src/inc/tfs.h
+++ b/src/inc/tfs.h
@@ -31,6 +31,8 @@ typedef struct {
#define TFS_UNDECIDED_ID -1
#define TFS_PRIMARY_LEVEL 0
#define TFS_PRIMARY_ID 0
+#define TFS_MIN_LEVEL 0
+#define TFS_MAX_LEVEL (TSDB_MAX_TIERS - 1)
// FS APIs ====================================
typedef struct {
diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h
index da33a2ba737183a10234ea75f0c8dd177e0cdea3..7abe3e99c720af1682fc103beec9a5d4caeb09eb 100644
--- a/src/inc/tsdb.h
+++ b/src/inc/tsdb.h
@@ -69,9 +69,13 @@ typedef struct {
int8_t precision;
int8_t compression;
int8_t update;
- int8_t cacheLastRow;
+ int8_t cacheLastRow; // 0:no cache, 1: cache last row, 2: cache last NULL column 3: 1&2
} STsdbCfg;
+#define CACHE_NO_LAST(c) ((c)->cacheLastRow == 0)
+#define CACHE_LAST_ROW(c) (((c)->cacheLastRow & 1) > 0)
+#define CACHE_LAST_NULL_COLUMN(c) (((c)->cacheLastRow & 2) > 0)
+
// --------- TSDB REPOSITORY USAGE STATISTICS
typedef struct {
int64_t totalStorage; // total bytes occupie
@@ -90,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH);
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
int tsdbGetState(STsdbRepo *repo);
-
+int8_t tsdbGetCompactState(STsdbRepo *repo);
// --------- TSDB TABLE DEFINITION
typedef struct {
uint64_t uid; // the unique table ID
@@ -107,7 +111,7 @@ typedef struct {
uint64_t superUid;
STSchema * schema;
STSchema * tagSchema;
- SDataRow tagValues;
+ SKVRow tagValues;
char * sql;
} STableCfg;
@@ -221,7 +225,7 @@ typedef struct {
typedef struct {
uint32_t numOfTables;
- SArray * pGroupList;
+ SArray *pGroupList;
SHashObj *map; // speedup acquire the tableQueryInfo by table uid
} STableGroupInfo;
@@ -266,6 +270,12 @@ TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, uint64_t qId,
SMemRef *pRef);
+
+TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef);
+
+bool isTsdbCacheLastRow(TsdbQueryHandleT* pQueryHandle);
+
+
/**
* get the queried table object list
* @param pHandle
@@ -400,6 +410,9 @@ void tsdbDecCommitRef(int vgId);
int tsdbSyncSend(void *pRepo, SOCKET socketFd);
int tsdbSyncRecv(void *pRepo, SOCKET socketFd);
+// For TSDB Compact
+int tsdbCompact(STsdbRepo *pRepo);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index e411ac62320c95262d179c903ea0041731adac3b..0a84d7f13b735e0589b4df5e17122ed2b0fe69b2 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -16,7 +16,6 @@
#ifndef TDENGINE_TTOKENDEF_H
#define TDENGINE_TTOKENDEF_H
-
#define TK_ID 1
#define TK_BOOL 2
#define TK_TINYINT 3
@@ -63,155 +62,155 @@
#define TK_SHOW 44
#define TK_DATABASES 45
#define TK_TOPICS 46
-#define TK_MNODES 47
-#define TK_DNODES 48
-#define TK_ACCOUNTS 49
-#define TK_USERS 50
-#define TK_MODULES 51
-#define TK_QUERIES 52
-#define TK_CONNECTIONS 53
-#define TK_STREAMS 54
-#define TK_VARIABLES 55
-#define TK_SCORES 56
-#define TK_GRANTS 57
-#define TK_VNODES 58
-#define TK_IPTOKEN 59
-#define TK_DOT 60
-#define TK_CREATE 61
-#define TK_TABLE 62
-#define TK_DATABASE 63
-#define TK_TABLES 64
-#define TK_STABLES 65
-#define TK_VGROUPS 66
-#define TK_DROP 67
-#define TK_STABLE 68
-#define TK_TOPIC 69
-#define TK_DNODE 70
-#define TK_USER 71
-#define TK_ACCOUNT 72
-#define TK_USE 73
-#define TK_DESCRIBE 74
-#define TK_ALTER 75
-#define TK_PASS 76
-#define TK_PRIVILEGE 77
-#define TK_LOCAL 78
-#define TK_IF 79
-#define TK_EXISTS 80
-#define TK_PPS 81
-#define TK_TSERIES 82
-#define TK_DBS 83
-#define TK_STORAGE 84
-#define TK_QTIME 85
-#define TK_CONNS 86
-#define TK_STATE 87
-#define TK_KEEP 88
-#define TK_CACHE 89
-#define TK_REPLICA 90
-#define TK_QUORUM 91
-#define TK_DAYS 92
-#define TK_MINROWS 93
-#define TK_MAXROWS 94
-#define TK_BLOCKS 95
-#define TK_CTIME 96
-#define TK_WAL 97
-#define TK_FSYNC 98
-#define TK_COMP 99
-#define TK_PRECISION 100
-#define TK_UPDATE 101
-#define TK_CACHELAST 102
-#define TK_PARTITIONS 103
-#define TK_LP 104
-#define TK_RP 105
-#define TK_UNSIGNED 106
-#define TK_TAGS 107
-#define TK_USING 108
-#define TK_COMMA 109
-#define TK_AS 110
-#define TK_NULL 111
-#define TK_SELECT 112
-#define TK_UNION 113
-#define TK_ALL 114
-#define TK_DISTINCT 115
-#define TK_FROM 116
-#define TK_VARIABLE 117
-#define TK_INTERVAL 118
-#define TK_SESSION 119
-#define TK_FILL 120
-#define TK_SLIDING 121
-#define TK_ORDER 122
-#define TK_BY 123
-#define TK_ASC 124
-#define TK_DESC 125
-#define TK_GROUP 126
-#define TK_HAVING 127
-#define TK_LIMIT 128
-#define TK_OFFSET 129
-#define TK_SLIMIT 130
-#define TK_SOFFSET 131
-#define TK_WHERE 132
-#define TK_NOW 133
-#define TK_RESET 134
-#define TK_QUERY 135
-#define TK_SYNCDB 136
-#define TK_ADD 137
-#define TK_COLUMN 138
-#define TK_TAG 139
-#define TK_CHANGE 140
-#define TK_SET 141
-#define TK_KILL 142
-#define TK_CONNECTION 143
-#define TK_STREAM 144
-#define TK_COLON 145
-#define TK_ABORT 146
-#define TK_AFTER 147
-#define TK_ATTACH 148
-#define TK_BEFORE 149
-#define TK_BEGIN 150
-#define TK_CASCADE 151
-#define TK_CLUSTER 152
-#define TK_CONFLICT 153
-#define TK_COPY 154
-#define TK_DEFERRED 155
-#define TK_DELIMITERS 156
-#define TK_DETACH 157
-#define TK_EACH 158
-#define TK_END 159
-#define TK_EXPLAIN 160
-#define TK_FAIL 161
-#define TK_FOR 162
-#define TK_IGNORE 163
-#define TK_IMMEDIATE 164
-#define TK_INITIALLY 165
-#define TK_INSTEAD 166
-#define TK_MATCH 167
-#define TK_KEY 168
-#define TK_OF 169
-#define TK_RAISE 170
-#define TK_REPLACE 171
-#define TK_RESTRICT 172
-#define TK_ROW 173
-#define TK_STATEMENT 174
-#define TK_TRIGGER 175
-#define TK_VIEW 176
-#define TK_SEMI 177
-#define TK_NONE 178
-#define TK_PREV 179
-#define TK_LINEAR 180
-#define TK_IMPORT 181
-#define TK_TBNAME 182
-#define TK_JOIN 183
-#define TK_INSERT 184
-#define TK_INTO 185
-#define TK_VALUES 186
-
-
-
-
-
-
-
-
-
+#define TK_FUNCTIONS 47
+#define TK_MNODES 48
+#define TK_DNODES 49
+#define TK_ACCOUNTS 50
+#define TK_USERS 51
+#define TK_MODULES 52
+#define TK_QUERIES 53
+#define TK_CONNECTIONS 54
+#define TK_STREAMS 55
+#define TK_VARIABLES 56
+#define TK_SCORES 57
+#define TK_GRANTS 58
+#define TK_VNODES 59
+#define TK_IPTOKEN 60
+#define TK_DOT 61
+#define TK_CREATE 62
+#define TK_TABLE 63
+#define TK_STABLE 64
+#define TK_DATABASE 65
+#define TK_TABLES 66
+#define TK_STABLES 67
+#define TK_VGROUPS 68
+#define TK_DROP 69
+#define TK_TOPIC 70
+#define TK_FUNCTION 71
+#define TK_DNODE 72
+#define TK_USER 73
+#define TK_ACCOUNT 74
+#define TK_USE 75
+#define TK_DESCRIBE 76
+#define TK_DESC 77
+#define TK_ALTER 78
+#define TK_PASS 79
+#define TK_PRIVILEGE 80
+#define TK_LOCAL 81
+#define TK_COMPACT 82
+#define TK_LP 83
+#define TK_RP 84
+#define TK_IF 85
+#define TK_EXISTS 86
+#define TK_AS 87
+#define TK_OUTPUTTYPE 88
+#define TK_AGGREGATE 89
+#define TK_BUFSIZE 90
+#define TK_PPS 91
+#define TK_TSERIES 92
+#define TK_DBS 93
+#define TK_STORAGE 94
+#define TK_QTIME 95
+#define TK_CONNS 96
+#define TK_STATE 97
+#define TK_COMMA 98
+#define TK_KEEP 99
+#define TK_CACHE 100
+#define TK_REPLICA 101
+#define TK_QUORUM 102
+#define TK_DAYS 103
+#define TK_MINROWS 104
+#define TK_MAXROWS 105
+#define TK_BLOCKS 106
+#define TK_CTIME 107
+#define TK_WAL 108
+#define TK_FSYNC 109
+#define TK_COMP 110
+#define TK_PRECISION 111
+#define TK_UPDATE 112
+#define TK_CACHELAST 113
+#define TK_PARTITIONS 114
+#define TK_UNSIGNED 115
+#define TK_TAGS 116
+#define TK_USING 117
+#define TK_NULL 118
+#define TK_NOW 119
+#define TK_SELECT 120
+#define TK_UNION 121
+#define TK_ALL 122
+#define TK_DISTINCT 123
+#define TK_FROM 124
+#define TK_VARIABLE 125
+#define TK_INTERVAL 126
+#define TK_EVERY 127
+#define TK_SESSION 128
+#define TK_STATE_WINDOW 129
+#define TK_FILL 130
+#define TK_SLIDING 131
+#define TK_ORDER 132
+#define TK_BY 133
+#define TK_ASC 134
+#define TK_GROUP 135
+#define TK_HAVING 136
+#define TK_LIMIT 137
+#define TK_OFFSET 138
+#define TK_SLIMIT 139
+#define TK_SOFFSET 140
+#define TK_WHERE 141
+#define TK_RESET 142
+#define TK_QUERY 143
+#define TK_SYNCDB 144
+#define TK_ADD 145
+#define TK_COLUMN 146
+#define TK_MODIFY 147
+#define TK_TAG 148
+#define TK_CHANGE 149
+#define TK_SET 150
+#define TK_KILL 151
+#define TK_CONNECTION 152
+#define TK_STREAM 153
+#define TK_COLON 154
+#define TK_ABORT 155
+#define TK_AFTER 156
+#define TK_ATTACH 157
+#define TK_BEFORE 158
+#define TK_BEGIN 159
+#define TK_CASCADE 160
+#define TK_CLUSTER 161
+#define TK_CONFLICT 162
+#define TK_COPY 163
+#define TK_DEFERRED 164
+#define TK_DELIMITERS 165
+#define TK_DETACH 166
+#define TK_EACH 167
+#define TK_END 168
+#define TK_EXPLAIN 169
+#define TK_FAIL 170
+#define TK_FOR 171
+#define TK_IGNORE 172
+#define TK_IMMEDIATE 173
+#define TK_INITIALLY 174
+#define TK_INSTEAD 175
+#define TK_MATCH 176
+#define TK_KEY 177
+#define TK_OF 178
+#define TK_RAISE 179
+#define TK_REPLACE 180
+#define TK_RESTRICT 181
+#define TK_ROW 182
+#define TK_STATEMENT 183
+#define TK_TRIGGER 184
+#define TK_VIEW 185
+#define TK_SEMI 186
+#define TK_NONE 187
+#define TK_PREV 188
+#define TK_LINEAR 189
+#define TK_IMPORT 190
+#define TK_TBNAME 191
+#define TK_JOIN 192
+#define TK_INSERT 193
+#define TK_INTO 194
+#define TK_VALUES 195
#define TK_SPACE 300
diff --git a/src/inc/ttype.h b/src/inc/ttype.h
index 662a23bfdbc52e432d2bcfbdac5c43cfba5d60dc..44e666106a7657691b0d97d259ccb7b61871b9a7 100644
--- a/src/inc/ttype.h
+++ b/src/inc/ttype.h
@@ -5,17 +5,33 @@
extern "C" {
#endif
+#include
+#include
#include "taosdef.h"
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
-typedef int32_t VarDataOffsetT;
-typedef int16_t VarDataLenT;
+typedef int32_t VarDataOffsetT;
+typedef int16_t VarDataLenT; // maxVarDataLen: 32767
+typedef uint16_t TDRowLenT; // not including overhead: 0 ~ 65535
+typedef uint32_t TDRowTLenT; // total length, including overhead
typedef struct tstr {
VarDataLenT len;
char data[];
} tstr;
+#pragma pack(push, 1)
+typedef struct {
+ VarDataLenT len;
+ uint8_t data;
+} SBinaryNullT;
+
+typedef struct {
+ VarDataLenT len;
+ uint32_t data;
+} SNCharNullT;
+#pragma pack(pop)
+
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
#define varDataLen(v) ((VarDataLenT *)(v))[0]
@@ -26,6 +42,10 @@ typedef struct tstr {
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len))
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
+#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
+#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
+
+
// this data type is internally used only in 'in' query to hold the values
#define TSDB_DATA_TYPE_ARRAY (1000)
@@ -118,8 +138,10 @@ typedef struct tstr {
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX)
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
+#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
+#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
-static FORCE_INLINE bool isNull(const char *val, int32_t type) {
+static FORCE_INLINE bool isNull(const void *val, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
@@ -171,10 +193,10 @@ extern tDataTypeDescriptor tDataTypes[15];
bool isValidDataType(int32_t type);
-void setVardataNull(char* val, int32_t type);
-void setNull(char *val, int32_t type, int32_t bytes);
-void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
-void *getNullValue(int32_t type);
+void setVardataNull(void* val, int32_t type);
+void setNull(void *val, int32_t type, int32_t bytes);
+void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems);
+const void *getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
diff --git a/src/inc/twal.h b/src/inc/twal.h
index bce398d6f95518379c1cd4c0d95a4f6324aab4d7..868a1fbd780232303b42e58185ffc00730c17546 100644
--- a/src/inc/twal.h
+++ b/src/inc/twal.h
@@ -32,7 +32,7 @@ typedef enum {
typedef struct {
int8_t msgType;
- int8_t sver;
+ int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility
int8_t reserved[2];
int32_t len;
uint64_t version;
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 576acb624a3076065cb1015430953fef44398612..b3291645c00be17283f7d078acb2d4c9a2629ece 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -49,7 +49,7 @@ typedef struct {
SRpcMsg rpcMsg;
SRspRet rspRet;
char reserveForSync[24];
- SWalHead pHead;
+ SWalHead walHead;
} SVWriteMsg;
// vnodeStatus
@@ -62,6 +62,7 @@ int32_t vnodeOpen(int32_t vgId);
int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg);
int32_t vnodeSync(int32_t vgId);
int32_t vnodeClose(int32_t vgId);
+int32_t vnodeCompact(int32_t vgId);
// vnodeMgmt
int32_t vnodeInitMgmt();
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 7053052007c5e00a5ac001d72b64029dc08ddf8b..fdf58d5ae1c21ebd8b2948114d9643d38dccae3e 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -4,3 +4,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdump)
+ADD_SUBDIRECTORY(taospack)
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index 794ca5e2de1820035524cc4180558b9f290c22c6..bf2bbca14d25aff3b3717c7b9785f1dc470a013a 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -19,9 +19,9 @@ ELSE ()
ENDIF ()
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(shell taos_static ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC})
ELSE ()
- TARGET_LINK_LIBRARIES(shell taos ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC})
ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h
index 2374150c529df5bcaefd848a00d9050cd8e43870..f207a866ddc712165340c06b026aa99081f91c81 100644
--- a/src/kit/shell/inc/shell.h
+++ b/src/kit/shell/inc/shell.h
@@ -25,7 +25,6 @@
#define MAX_USERNAME_SIZE 64
#define MAX_DBNAME_SIZE 64
#define MAX_IP_SIZE 20
-#define MAX_PASSWORD_SIZE 20
#define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 1048586
#define HISTORY_FILE ".taos_history"
@@ -56,6 +55,8 @@ typedef struct SShellArguments {
int abort;
int port;
int pktLen;
+ int pktNum;
+ char* pktType;
char* netTestRole;
} SShellArguments;
diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h
index 3094bdb9ddb1ccd9debdbca88a34197385deb367..6e4d3e382e3d7e8c50405c07da8ed73725230434 100644
--- a/src/kit/shell/inc/shellCommand.h
+++ b/src/kit/shell/inc/shellCommand.h
@@ -35,6 +35,8 @@ struct Command {
};
extern void backspaceChar(Command *cmd);
+extern void clearLineBefore(Command *cmd);
+extern void clearLineAfter(Command *cmd);
extern void deleteChar(Command *cmd);
extern void moveCursorLeft(Command *cmd);
extern void moveCursorRight(Command *cmd);
@@ -45,7 +47,7 @@ extern void updateBuffer(Command *cmd);
extern int isReadyGo(Command *cmd);
extern void resetCommand(Command *cmd, const char s[]);
-int countPrefixOnes(char c);
+int countPrefixOnes(unsigned char c);
void clearScreen(int ecmd_pos, int cursor_pos);
void printChar(char c, int times);
void positionCursor(int step, int direction);
diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c
index b88244ea018291fbdc98165a8665949f618e3291..7fc8b1409a7602df48108d0e7f4763da48ed6497 100644
--- a/src/kit/shell/src/shellCheck.c
+++ b/src/kit/shell/src/shellCheck.c
@@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
int32_t tbIndex = tbNum++;
if (tbMallocNum < tbNum) {
tbMallocNum = (tbMallocNum * 2 + 1);
- tbNames = realloc(tbNames, tbMallocNum * sizeof(char *));
- if (tbNames == NULL) {
+ char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
+ if (tbNames1 == NULL) {
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
break;
}
+ tbNames = tbNames1;
}
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);
@@ -104,6 +105,8 @@ static void shellFreeTbnames() {
static void *shellCheckThreadFp(void *arg) {
ShellThreadObj *pThread = (ShellThreadObj *)arg;
+ setThreadName("shellCheckThrd");
+
int32_t interval = tbNum / pThread->totalThreads + 1;
int32_t start = pThread->threadIndex * interval;
int32_t end = (pThread->threadIndex + 1) * interval;
@@ -142,21 +145,21 @@ static void *shellCheckThreadFp(void *arg) {
taos_free_result(pSql);
}
- fsync(fileno(fp));
+ taosFsync(fileno(fp));
fclose(fp);
return NULL;
}
-static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
+static void shellRunCheckThreads(TAOS *con, SShellArguments *_args) {
pthread_attr_t thattr;
- ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
- for (int t = 0; t < args->threadNum; ++t) {
+ ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
+ for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
- pThread->totalThreads = args->threadNum;
+ pThread->totalThreads = _args->threadNum;
pThread->taos = con;
- pThread->db = args->database;
+ pThread->db = _args->database;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -167,31 +170,31 @@ static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
}
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
-void shellCheck(TAOS *con, SShellArguments *args) {
+void shellCheck(TAOS *con, SShellArguments *_args) {
int64_t start = taosGetTimestampMs();
- if (shellUseDb(con, args->database) != 0) {
+ if (shellUseDb(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
- if (shellShowTables(con, args->database) != 0) {
+ if (shellShowTables(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
- fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, args->threadNum);
- shellRunCheckThreads(con, args);
+ fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, _args->threadNum);
+ shellRunCheckThreads(con, _args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "total %d tables checked, failed:%d, time spent %.2f seconds\n", checkedNum, errorNum,
diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c
index 9173ab0efdae7e5900218b2ab256993df71b21dd..67e0c949890728268afcaf67804dd20e10231ba4 100644
--- a/src/kit/shell/src/shellCommand.c
+++ b/src/kit/shell/src/shellCommand.c
@@ -26,7 +26,7 @@ typedef struct {
char widthOnScreen;
} UTFCodeInfo;
-int countPrefixOnes(char c) {
+int countPrefixOnes(unsigned char c) {
unsigned char mask = 127;
mask = ~mask;
int ret = 0;
@@ -48,7 +48,7 @@ void getPrevCharSize(const char *str, int pos, int *size, int *width) {
while (--pos >= 0) {
*size += 1;
- if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break;
+ if (str[pos] > 0 || countPrefixOnes((unsigned char )str[pos]) > 1) break;
}
int rc = mbtowc(&wc, str + pos, MB_CUR_MAX);
@@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) {
}
}
+void clearLineBefore(Command *cmd) {
+ assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
+
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+ memmove(cmd->command, cmd->command + cmd->cursorOffset,
+ cmd->commandSize - cmd->cursorOffset);
+ cmd->commandSize -= cmd->cursorOffset;
+ cmd->cursorOffset = 0;
+ cmd->screenOffset = 0;
+ cmd->endOffset = cmd->commandSize;
+ showOnScreen(cmd);
+}
+
+void clearLineAfter(Command *cmd) {
+ assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
+
+ clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size);
+ cmd->commandSize -= cmd->endOffset - cmd->cursorOffset;
+ cmd->endOffset = cmd->cursorOffset;
+ showOnScreen(cmd);
+}
+
void deleteChar(Command *cmd) {
assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset);
diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c
index 31ad7046e9176221e10c79b3f2367ea464529438..a1413be1ce4ce6f67516fc09121115f30bbc56f0 100644
--- a/src/kit/shell/src/shellDarwin.c
+++ b/src/kit/shell/src/shellDarwin.c
@@ -64,6 +64,10 @@ void printHelp() {
exit(EXIT_SUCCESS);
}
+char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
+char g_password[SHELL_MAX_PASSWORD_LEN];
+
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
wordexp_t full_path;
for (int i = 1; i < argc; i++) {
@@ -77,10 +81,28 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
}
}
// for password
- else if (strcmp(argv[i], "-p") == 0) {
- arguments->is_use_passwd = true;
+ else if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ strcpy(tsOsName, "Darwin");
+ printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", g_password) > 1) {
+ fprintf(stderr, "password read error\n");
+ }
+ taosSetConsoleEcho(true);
+ getchar();
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ }
+ arguments->password = g_password;
+ arguments->is_use_passwd = true;
+ strcpy(argv[i], "");
+ argc -= 1;
}
- // for management port
+ // for management port
else if (strcmp(argv[i], "-P") == 0) {
if (i < argc - 1) {
arguments->port = atoi(argv[++i]);
@@ -98,7 +120,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
- if (i < argc - 1) {
+ if (i < argc - 1) {
if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
@@ -238,10 +260,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
+ case 11: // Ctrl + K;
+ clearLineAfter(&cmd);
+ break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
+ case 21: // Ctrl + U
+ clearLineBefore(&cmd);
+ break;
}
} else if (c == '\033') {
c = getchar();
@@ -336,6 +364,8 @@ void *shellLoopQuery(void *arg) {
TAOS *con = (TAOS *)arg;
+ setThreadName("shellLoopQuery");
+
pthread_cleanup_push(cleanup_handler, NULL);
char *command = malloc(MAX_COMMAND_SIZE);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index f670d05f55d5012707379fafce02d15ee718b28f..ef3a2458a07ba7ab3ad516566e1a38a32526146b 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -37,6 +37,13 @@ char PROMPT_HEADER[] = "power> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 7;
+#elif (_TD_TQ_ == true)
+char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "tq> ";
+
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 4;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
@@ -56,24 +63,30 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
/*
* FUNCTION: Initialize the shell.
*/
-TAOS *shellInit(SShellArguments *args) {
+TAOS *shellInit(SShellArguments *_args) {
printf("\n");
- printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
+ if (!_args->is_use_passwd) {
+#ifdef TD_WINDOWS
+ strcpy(tsOsName, "Windows");
+#elif defined(TD_DARWIN)
+ strcpy(tsOsName, "Darwin");
+#endif
+ printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
+ }
+
fflush(stdout);
// set options before initializing
- if (args->timezone != NULL) {
- taos_options(TSDB_OPTION_TIMEZONE, args->timezone);
+ if (_args->timezone != NULL) {
+ taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
- if (args->is_use_passwd) {
- if (args->password == NULL) args->password = getpass("Enter password: ");
- } else {
- args->password = TSDB_DEFAULT_PASS;
+ if (!_args->is_use_passwd) {
+ _args->password = TSDB_DEFAULT_PASS;
}
- if (args->user == NULL) {
- args->user = TSDB_DEFAULT_USER;
+ if (_args->user == NULL) {
+ _args->user = TSDB_DEFAULT_USER;
}
if (taos_init()) {
@@ -84,10 +97,10 @@ TAOS *shellInit(SShellArguments *args) {
// Connect to the database.
TAOS *con = NULL;
- if (args->auth == NULL) {
- con = taos_connect(args->host, args->user, args->password, args->database, args->port);
+ if (_args->auth == NULL) {
+ con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port);
} else {
- con = taos_connect_auth(args->host, args->user, args->auth, args->database, args->port);
+ con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port);
}
if (con == NULL) {
@@ -100,14 +113,14 @@ TAOS *shellInit(SShellArguments *args) {
read_history();
// Check if it is temperory run
- if (args->commands != NULL || args->file[0] != 0) {
- if (args->commands != NULL) {
- printf("%s%s\n", PROMPT_HEADER, args->commands);
- shellRunCommand(con, args->commands);
+ if (_args->commands != NULL || _args->file[0] != 0) {
+ if (_args->commands != NULL) {
+ printf("%s%s\n", PROMPT_HEADER, _args->commands);
+ shellRunCommand(con, _args->commands);
}
- if (args->file[0] != 0) {
- source_file(con, args->file);
+ if (_args->file[0] != 0) {
+ source_file(con, _args->file);
}
taos_close(con);
@@ -116,14 +129,14 @@ TAOS *shellInit(SShellArguments *args) {
}
#ifndef WINDOWS
- if (args->dir[0] != 0) {
- source_dir(con, args);
+ if (_args->dir[0] != 0) {
+ source_dir(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
- if (args->check != 0) {
- shellCheck(con, args);
+ if (_args->check != 0) {
+ shellCheck(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
@@ -163,7 +176,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) {
system("clear");
return 0;
}
-
+
if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
strtok(command, " \t");
strtok(NULL, " \t");
@@ -175,7 +188,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) {
}
return 0;
}
-
+
if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
/* If source file. */
char *c_ptr = strtok(command, " ;");
@@ -240,10 +253,14 @@ int32_t shellRunCommand(TAOS* con, char* command) {
esc = false;
continue;
}
-
+
if (c == '\\') {
- esc = true;
- continue;
+ if (quote != 0 && (*command == '_' || *command == '\\')) {
+ //DO nothing
+ } else {
+ esc = true;
+ continue;
+ }
}
if (quote == c) {
@@ -329,8 +346,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
}
if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands
- int error_no = 0;
-
+ int error_no = 0;
+
int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode);
if (numOfRows < 0) {
atomic_store_64(&result, 0);
@@ -398,7 +415,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
int32_t ms = 0;
- if (precision == TSDB_TIME_PRECISION_MICRO) {
+ if (precision == TSDB_TIME_PRECISION_NANO) {
+ tt = (time_t)(val / 1000000000);
+ ms = val % 1000000000;
+ } else if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
ms = val % 1000000;
} else {
@@ -419,7 +439,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
#endif
if (tt <= 0 && ms < 0) {
tt--;
- if (precision == TSDB_TIME_PRECISION_MICRO) {
+ if (precision == TSDB_TIME_PRECISION_NANO) {
+ ms += 1000000000;
+ } else if (precision == TSDB_TIME_PRECISION_MICRO) {
ms += 1000000;
} else {
ms += 1000;
@@ -427,9 +449,11 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
struct tm* ptm = localtime(&tt);
- size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
+ size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm);
- if (precision == TSDB_TIME_PRECISION_MICRO) {
+ if (precision == TSDB_TIME_PRECISION_NANO) {
+ sprintf(buf + pos, ".%09d", ms);
+ } else if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", ms);
} else {
sprintf(buf + pos, ".%03d", ms);
@@ -516,7 +540,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) {
fprintf(fp, "%s", fields[col].name);
}
fputc('\n', fp);
-
+
int numOfRows = 0;
do {
int32_t* length = taos_fetch_lengths(tres);
@@ -702,7 +726,7 @@ static int verticalPrintResult(TAOS_RES* tres) {
int numOfRows = 0;
int showMore = 1;
- do {
+ do {
if (numOfRows < resShowMaxNum) {
printf("*************************** %d.row ***************************\n", numOfRows + 1);
@@ -778,6 +802,8 @@ static int calcColWidth(TAOS_FIELD* field, int precision) {
case TSDB_DATA_TYPE_TIMESTAMP:
if (args.is_raw_time) {
return MAX(14, width);
+ } if (precision == TSDB_TIME_PRECISION_NANO) {
+ return MAX(29, width);
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
return MAX(26, width); // '2020-01-01 00:00:00.000000'
} else {
@@ -835,7 +861,7 @@ static int horizontalPrintResult(TAOS_RES* tres) {
int numOfRows = 0;
int showMore = 1;
-
+
do {
int32_t* length = taos_fetch_lengths(tres);
if (numOfRows < resShowMaxNum) {
@@ -851,7 +877,7 @@ static int horizontalPrintResult(TAOS_RES* tres) {
printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n");
showMore = 0;
}
-
+
numOfRows++;
row = taos_fetch_row(tres);
} while(row != NULL);
@@ -893,7 +919,7 @@ void read_history() {
if (errno != ENOENT) {
fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
}
-#endif
+#endif
return;
}
@@ -918,9 +944,9 @@ void write_history() {
FILE *f = fopen(f_history, "w");
if (f == NULL) {
-#ifndef WINDOWS
+#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
-#endif
+#endif
return;
}
@@ -966,13 +992,13 @@ void source_file(TAOS *con, char *fptr) {
/*
if (access(fname, F_OK) != 0) {
fprintf(stderr, "ERROR: file %s is not exist\n", fptr);
-
+
wordfree(&full_path);
free(cmd);
return;
}
*/
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c
index af61995c618bc80389b8abf6d8c6f6c929327925..222d69e854933095ec0aadaa8a67bf1c19954c3b 100644
--- a/src/kit/shell/src/shellImport.c
+++ b/src/kit/shell/src/shellImport.c
@@ -223,6 +223,8 @@ static void shellSourceFile(TAOS *con, char *fptr) {
void* shellImportThreadFp(void *arg)
{
ShellThreadObj *pThread = (ShellThreadObj*)arg;
+ setThreadName("shellImportThrd");
+
for (int f = 0; f < shellSQLFileNum; ++f) {
if (f % pThread->totalThreads == pThread->threadIndex) {
char *SQLFileName = shellSQLFiles[f];
@@ -233,15 +235,15 @@ void* shellImportThreadFp(void *arg)
return NULL;
}
-static void shellRunImportThreads(SShellArguments* args)
+static void shellRunImportThreads(SShellArguments* _args)
{
pthread_attr_t thattr;
- ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
- for (int t = 0; t < args->threadNum; ++t) {
+ ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
+ for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
- pThread->totalThreads = args->threadNum;
- pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsDnodeShellPort);
+ pThread->totalThreads = _args->threadNum;
+ pThread->taos = taos_connect(_args->host, _args->user, _args->password, _args->database, tsDnodeShellPort);
if (pThread->taos == NULL) {
fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, "null taos"/*taos_errstr(pThread->taos)*/);
exit(0);
@@ -256,18 +258,18 @@ static void shellRunImportThreads(SShellArguments* args)
}
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
- for (int t = 0; t < args->threadNum; ++t) {
+ for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
-void source_dir(TAOS* con, SShellArguments* args) {
- shellGetDirectoryFileList(args->dir);
+void source_dir(TAOS* con, SShellArguments* _args) {
+ shellGetDirectoryFileList(_args->dir);
int64_t start = taosGetTimestampMs();
if (shellTablesSQLFile[0] != 0) {
@@ -276,7 +278,7 @@ void source_dir(TAOS* con, SShellArguments* args) {
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", shellTablesSQLFile, (end - start) / 1000.0);
}
- shellRunImportThreads(args);
+ shellRunImportThreads(_args);
int64_t end = taosGetTimestampMs();
- fprintf(stdout, "import %s finished, time spent %.2f seconds\n", args->dir, (end - start) / 1000.0);
+ fprintf(stdout, "import %s finished, time spent %.2f seconds\n", _args->dir, (end - start) / 1000.0);
}
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index 3f6b3da9bf4b1f80f58a06613e8571ca16892c46..93783b205560604c9d25c9f5dc2e73a239a67b8e 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -34,10 +34,10 @@ static char doc[] = "";
static char args_doc[] = "";
static struct argp_option options[] = {
{"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."},
- {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
+ {"password", 'p', 0, 0, "The password to use when connecting to the server."},
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
- {"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."},
+ {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
{"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."},
{"dump-config", 'C', 0, 0, "Dump configuration."},
{"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."},
@@ -47,9 +47,11 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"check", 'k', "CHECK", 0, "Check tables."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
- {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
- {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync."},
+ {"timezone", 'z', "TIMEZONE", 0, "Time zone of the shell, default is local."},
+ {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speen|fqdn."},
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
+ {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."},
+ {"pkttype", 'S', "PKTTYPE", 0, "Packet type used for net test, default is TCP."},
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
@@ -63,8 +65,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->host = arg;
break;
case 'p':
- arguments->is_use_passwd = true;
- if (arg) arguments->password = arg;
break;
case 'P':
if (arg) {
@@ -76,7 +76,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
}
break;
- case 't':
+ case 'z':
arguments->timezone = arg;
break;
case 'u':
@@ -108,7 +108,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->is_raw_time = true;
break;
case 'f':
- if (wordexp(arg, &full_path, 0) != 0) {
+ if ((0 == strlen(arg)) || (wordexp(arg, &full_path, 0) != 0)) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
}
@@ -148,6 +148,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return -1;
}
break;
+ case 'N':
+ if (arg) {
+ arguments->pktNum = atoi(arg);
+ } else {
+ fprintf(stderr, "Invalid packet number\n");
+ return -1;
+ }
+ break;
+ case 'S':
+ arguments->pktType = arg;
+ break;
case OPT_ABORT:
arguments->abort = 1;
break;
@@ -160,12 +171,48 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
/* Our argp parser. */
static struct argp argp = {options, parse_opt, args_doc, doc};
+char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
+char g_password[SHELL_MAX_PASSWORD_LEN];
+
+static void parse_args(
+ int argc, char *argv[], SShellArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ strcpy(tsOsName, "Linux");
+ printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%20s", g_password) > 1) {
+ fprintf(stderr, "password reading error\n");
+ }
+ taosSetConsoleEcho(true);
+ if (EOF == getchar()) {
+ fprintf(stderr, "getchar() return EOF\n");
+ }
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ strcpy(argv[i], "-p");
+ }
+ arguments->password = g_password;
+ arguments->is_use_passwd = true;
+ }
+ }
+}
+
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
static char verType[32] = {0};
sprintf(verType, "version: %s\n", version);
argp_program_version = verType;
-
+
+ if (argc > 1) {
+ parse_args(argc, argv, arguments);
+ }
+
argp_parse(&argp, argc, argv, 0, 0, arguments);
if (arguments->abort) {
#ifndef _ALPINE
@@ -238,10 +285,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer(&cmd);
}
break;
+ case 11: // Ctrl + K;
+ clearLineAfter(&cmd);
+ break;
case 12: // Ctrl + L;
system("clear");
showOnScreen(&cmd);
break;
+ case 21: // Ctrl + U;
+ clearLineBefore(&cmd);
+ break;
}
} else if (c == '\033') {
c = (char)getchar();
@@ -336,6 +389,8 @@ void *shellLoopQuery(void *arg) {
TAOS *con = (TAOS *)arg;
+ setThreadName("shellLoopQuery");
+
pthread_cleanup_push(cleanup_handler, NULL);
char *command = malloc(MAX_COMMAND_SIZE);
@@ -415,7 +470,7 @@ void set_terminal_mode() {
}
}
-void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
+void get_history_path(char *_history) { snprintf(_history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w;
diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c
index 4c7e550760cecb7c045cb8c94fc431cb5f91812b..5c9dc0995dacecebd10b7f2b77e216ca97157db0 100644
--- a/src/kit/shell/src/shellMain.c
+++ b/src/kit/shell/src/shellMain.c
@@ -26,6 +26,8 @@ void shellQueryInterruptHandler(int32_t signum, void *sigInfo, void *context) {
}
void *cancelHandler(void *arg) {
+ setThreadName("cancelHandler");
+
while(1) {
if (tsem_wait(&cancelSem) != 0) {
taosMsleep(10);
@@ -69,7 +71,9 @@ int checkVersion() {
// Global configurations
SShellArguments args = {
.host = NULL,
+#ifndef TD_WINDOWS
.password = NULL,
+#endif
.user = NULL,
.database = NULL,
.timezone = NULL,
@@ -81,6 +85,8 @@ SShellArguments args = {
.threadNum = 5,
.commands = NULL,
.pktLen = 1000,
+ .pktNum = 100,
+ .pktType = "TCP",
.netTestRole = NULL
};
@@ -114,7 +120,7 @@ int main(int argc, char* argv[]) {
printf("Failed to init taos");
exit(EXIT_FAILURE);
}
- taosNetTest(args.netTestRole, args.host, args.port, args.pktLen);
+ taosNetTest(args.netTestRole, args.host, args.port, args.pktLen, args.pktNum, args.pktType);
exit(0);
}
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index 87d11a3516a65e83201bf4ebe51f07e5394d5cdf..abec34b84c5ff65d6cb13492028cd36321d2d0ca 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -19,6 +19,9 @@
extern char configDir[];
+char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
+
void printVersion() {
printf("version: %s\n", version);
}
@@ -52,15 +55,21 @@ void printHelp() {
printf("%s%s\n", indent, "-t");
printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local.");
printf("%s%s\n", indent, "-n");
- printf("%s%s%s\n", indent, indent, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync.");
+ printf("%s%s%s\n", indent, indent, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speed|fqdn.");
printf("%s%s\n", indent, "-l");
printf("%s%s%s\n", indent, indent, "Packet length used for net test, default is 1000 bytes.");
+ printf("%s%s\n", indent, "-N");
+ printf("%s%s%s\n", indent, indent, "Packet numbers used for net test, default is 100.");
+ printf("%s%s\n", indent, "-S");
+ printf("%s%s%s\n", indent, indent, "Packet type used for net test, default is TCP.");
printf("%s%s\n", indent, "-V");
printf("%s%s%s\n", indent, indent, "Print program version.");
exit(EXIT_SUCCESS);
}
+char g_password[SHELL_MAX_PASSWORD_LEN];
+
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) {
// for host
@@ -73,11 +82,26 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
}
}
// for password
- else if (strcmp(argv[i], "-p") == 0) {
- arguments->is_use_passwd = true;
- if (i < argc - 1 && argv[i + 1][0] != '-') {
- arguments->password = argv[++i];
- }
+ else if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ arguments->is_use_passwd = true;
+ strcpy(tsOsName, "Windows");
+ printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", g_password) > 1) {
+ fprintf(stderr, "password read error!\n");
+ }
+ taosSetConsoleEcho(true);
+ getchar();
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ }
+ arguments->password = g_password;
+ strcpy(argv[i], "");
+ argc -= 1;
}
// for management port
else if (strcmp(argv[i], "-P") == 0) {
@@ -104,7 +128,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
- if (i < argc - 1) {
+ if (i < argc - 1) {
char *tmp = argv[++i];
if (strlen(tmp) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1);
@@ -265,7 +289,7 @@ void *shellLoopQuery(void *arg) {
if (command == NULL) return NULL;
int32_t err = 0;
-
+
do {
memset(command, 0, MAX_COMMAND_SIZE);
shellPrintPrompt();
@@ -274,7 +298,7 @@ void *shellLoopQuery(void *arg) {
err = shellReadCommand(con, command);
if (err) {
break;
- }
+ }
} while (shellRunCommand(con, command) == 0);
return NULL;
diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt
index 584de340947035457abd985ac93697ed51c305af..2034093ad5841c267b722930681127d745d27153 100644
--- a/src/kit/taosdemo/CMakeLists.txt
+++ b/src/kit/taosdemo/CMakeLists.txt
@@ -67,7 +67,7 @@ IF (TD_LINUX)
ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemo taos_static cJson ${LINK_JEMALLOC})
+ TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC})
ELSE ()
TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC})
ENDIF ()
@@ -76,9 +76,9 @@ ELSEIF (TD_WINDOWS)
ADD_EXECUTABLE(taosdemo ${SRC})
SET_SOURCE_FILES_PROPERTIES(./taosdemo.c PROPERTIES COMPILE_FLAGS -w)
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemo taos_static cJson)
+ TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
- TARGET_LINK_LIBRARIES(taosdemo taos cJson)
+ TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF ()
ELSEIF (TD_DARWIN)
# missing a few dependencies, such as
@@ -86,9 +86,9 @@ ELSEIF (TD_DARWIN)
ADD_EXECUTABLE(taosdemo ${SRC})
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdemo taos_static cJson)
+ TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
- TARGET_LINK_LIBRARIES(taosdemo taos cJson)
+ TARGET_LINK_LIBRARIES(taosdemo taos cJson lua)
ENDIF ()
ENDIF ()
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 84e65c7321a7bccc106f630497f2f5df420e9ce5..ae9289e3b0982ef950f5f67674c3b91ae41d0895 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -20,6 +20,7 @@
#include
#include
+#include
#define _GNU_SOURCE
#define CURL_STATICLIB
@@ -53,14 +54,6 @@
#include "taoserror.h"
#include "tutil.h"
-#define STMT_IFACE_ENABLED 0
-#define NANO_SECOND_ENABLED 0
-#define SET_THREADNAME_ENABLED 0
-
-#if SET_THREADNAME_ENABLED == 0
-#define setThreadName(name)
-#endif
-
#define REQ_EXTRA_BUF_LEN 1024
#define RESP_BUF_LEN 4096
@@ -77,13 +70,14 @@ extern char configDir[];
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
#define MAX_USERNAME_SIZE 64
-#define MAX_PASSWORD_SIZE 64
#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
+#define DEFAULT_START_TIME 1500000000000
+
#define MAX_PREPARED_RAND 1000000
#define INT_BUFF_LEN 11
#define BIGINT_BUFF_LEN 21
@@ -94,7 +88,7 @@ extern char configDir[];
#define DOUBLE_BUFF_LEN 42
#define TIMESTAMP_BUFF_LEN 21
-#define MAX_SAMPLES_ONCE_FROM_FILE 10000
+#define MAX_SAMPLES 10000
#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
#define MAX_DB_COUNT 8
@@ -111,8 +105,19 @@ extern char configDir[];
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
#define DEFAULT_TIMESTAMP_STEP 1
+#define DEFAULT_INTERLACE_ROWS 0
+#define DEFAULT_DATATYPE_NUM 1
+#define DEFAULT_CHILDTABLES 10000
+#define STMT_BIND_PARAM_BATCH 0
+
+char* g_sampleDataBuf = NULL;
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+char* g_sampleBindBatchArray = NULL;
+#endif
+
enum TEST_MODE {
INSERT_TEST, // 0
QUERY_TEST, // 1
@@ -120,17 +125,17 @@ enum TEST_MODE {
INVAID_TEST
};
-typedef enum CREATE_SUB_TALBE_MOD_EN {
+typedef enum CREATE_SUB_TABLE_MOD_EN {
PRE_CREATE_SUBTBL,
AUTO_CREATE_SUBTBL,
NO_CREATE_SUBTBL
-} CREATE_SUB_TALBE_MOD_EN;
+} CREATE_SUB_TABLE_MOD_EN;
-typedef enum TALBE_EXISTS_EN {
+typedef enum TABLE_EXISTS_EN {
TBL_NO_EXISTS,
TBL_ALREADY_EXISTS,
TBL_EXISTS_BUTT
-} TALBE_EXISTS_EN;
+} TABLE_EXISTS_EN;
enum enumSYNC_MODE {
SYNC_MODE,
@@ -210,13 +215,13 @@ enum _describe_table_index {
static char *g_dupstr = NULL;
typedef struct SArguments_S {
- char * metaFile;
+ char *metaFile;
uint32_t test_mode;
- char * host;
+ char *host;
uint16_t port;
uint16_t iface;
char * user;
- char * password;
+ char password[SHELL_MAX_PASSWORD_LEN];
char * database;
int replica;
char * tb_prefix;
@@ -230,37 +235,39 @@ typedef struct SArguments_S {
bool performance_print;
char * output_file;
bool async_mode;
- char * datatype[MAX_NUM_COLUMNS + 1];
- uint32_t len_of_binary;
- uint32_t num_of_CPR;
- uint32_t num_of_threads;
+ char data_type[MAX_NUM_COLUMNS+1];
+ char *dataType[MAX_NUM_COLUMNS+1];
+ uint32_t binwidth;
+ uint32_t columnCount;
+ uint64_t lenOfOneRow;
+ uint32_t nthreads;
uint64_t insert_interval;
uint64_t timestamp_step;
int64_t query_times;
- uint32_t interlace_rows;
- uint32_t num_of_RPR; // num_of_records_per_req
+ uint32_t interlaceRows;
+ uint32_t reqPerReq; // num_of_records_per_req
uint64_t max_sql_len;
- int64_t num_of_tables;
- int64_t num_of_DPT;
+ int64_t ntables;
+ int64_t insertRows;
int abort;
uint32_t disorderRatio; // 0: no disorder, >0: x%
- int disorderRange; // ms, us or ns. accordig to database precision
+ int disorderRange; // ms, us or ns. according to database precision
uint32_t method_of_delete;
- char ** arg_list;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
bool demo_mode; // use default column name and semi-random data
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN];
- char dataType[DATATYPE_BUFF_LEN];
- uint32_t dataLen;
- char note[NOTE_BUFF_LEN];
+ char field[TSDB_COL_NAME_LEN];
+ char data_type;
+ char dataType[DATATYPE_BUFF_LEN];
+ uint32_t dataLen;
+ char note[NOTE_BUFF_LEN];
} StrColumn;
typedef struct SSuperTable_S {
- char sTblName[TSDB_TABLE_NAME_LEN];
+ char stbName[TSDB_TABLE_NAME_LEN];
char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
char childTblPrefix[TBNAME_PREFIX_LEN];
uint16_t childTblExists;
@@ -296,14 +303,16 @@ typedef struct SSuperTable_S {
uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
- //int sampleRowCount;
- //int sampleUsePos;
uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
uint32_t tagSampleCount;
uint32_t tagUsePos;
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+ char *sampleBindBatchArray;
+#endif
// statistics
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
@@ -364,7 +373,7 @@ typedef struct SDbs_S {
uint16_t port;
char user[MAX_USERNAME_SIZE];
- char password[MAX_PASSWORD_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
bool insert_only;
@@ -372,7 +381,7 @@ typedef struct SDbs_S {
bool asyncMode;
uint32_t threadCount;
- uint32_t threadCountByCreateTbl;
+ uint32_t threadCountForCreateTbl;
uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
@@ -383,7 +392,7 @@ typedef struct SDbs_S {
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t concurrent;
int sqlCount;
uint32_t asyncMode; // 0: sync, 1: async
@@ -403,8 +412,8 @@ typedef struct SpecifiedQueryInfo_S {
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
- char sTblName[TSDB_TABLE_NAME_LEN];
- uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ char stbName[TSDB_TABLE_NAME_LEN];
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
@@ -430,7 +439,7 @@ typedef struct SQueryMetaInfo_S {
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
- char password[MAX_PASSWORD_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
char dbName[TSDB_DB_NAME_LEN];
char queryMode[SMALL_BUFF_LEN]; // taosc, rest
@@ -442,6 +451,16 @@ typedef struct SQueryMetaInfo_S {
typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT *stmt;
+ int64_t *bind_ts;
+
+#if STMT_BIND_PARAM_BATCH == 1
+ int64_t *bind_ts_array;
+ char *bindParams;
+ char *is_null;
+#else
+ char* sampleBindArray;
+#endif
+
int threadID;
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
@@ -451,11 +470,12 @@ typedef struct SThreadInfo_S {
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
+ int64_t tables_created;
uint64_t data_of_rate;
int64_t start_time;
char* cols;
bool use_metric;
- SSuperTable* superTblInfo;
+ SSuperTable* stbInfo;
char *buffer; // sql cmd buffer
// for async insert
@@ -586,27 +606,25 @@ char *g_rand_current_buff = NULL;
char *g_rand_phase_buff = NULL;
char *g_randdouble_buff = NULL;
-char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
- "max(col0)", "min(col0)", "first(col0)", "last(col0)"};
-
-#define DEFAULT_DATATYPE_NUM 3
+char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
+ "max(C0)", "min(C0)", "first(C0)", "last(C0)"};
SArguments g_args = {
- NULL, // metaFile
- 0, // test_mode
- "127.0.0.1", // host
- 6030, // port
- INTERFACE_BUT, // iface
- "root", // user
+ NULL, // metaFile
+ 0, // test_mode
+ "localhost", // host
+ 6030, // port
+ INTERFACE_BUT, // iface
+ "root", // user
#ifdef _TD_POWER_
"powerdb", // password
#elif (_TD_TQ_ == true)
- "tqueue", // password
+ "tqueue", // password
#else
- "taosdata", // password
+ "taosdata", // password
#endif
- "test", // database
- 1, // replica
+ "test", // database
+ 1, // replica
"d", // tb_prefix
NULL, // sqlFile
true, // use_metric
@@ -618,36 +636,38 @@ SArguments g_args = {
false, // answer_yes;
"./output.txt", // output_file
0, // mode : sync or async
+ {TSDB_DATA_TYPE_FLOAT,
+ TSDB_DATA_TYPE_INT,
+ TSDB_DATA_TYPE_FLOAT},
{
- "FLOAT", // datatype
- "INT", // datatype
- "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3
+ "FLOAT", // dataType
+ "INT", // dataType
+ "FLOAT", // dataType. demo mode has 3 columns
},
- 16, // len_of_binary
- 4, // num_of_CPR
- 10, // num_of_connections/thread
+ 64, // binwidth
+ 4, // columnCount, timestamp + float + int + float
+ 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow
+ 8, // num_of_connections/thread
0, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step
1, // query_times
- 0, // interlace_rows;
- 30000, // num_of_RPR
+ DEFAULT_INTERLACE_ROWS, // interlaceRows;
+ 30000, // reqPerReq
(1024*1024), // max_sql_len
- 10000, // num_of_tables
- 10000, // num_of_DPT
+ DEFAULT_CHILDTABLES, // ntables
+ 10000, // insertRows
0, // abort
0, // disorderRatio
1000, // disorderRange
1, // method_of_delete
- NULL, // arg_list
0, // totalInsertRows;
0, // totalAffectedRows;
true, // demo_mode;
};
-
-
static SDbs g_Dbs;
-static int64_t g_totalChildTables = 0;
+static int64_t g_totalChildTables = DEFAULT_CHILDTABLES;
+static int64_t g_actualChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
@@ -668,7 +688,28 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
- do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
+ do {\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\
+ fprintf(stderr, " \033[0m");\
+ } while(0)
+
+#define errorPrint2(fmt, ...) \
+ do {\
+ struct tm Tm, *ptm;\
+ struct timeval timeSecs; \
+ time_t curTime;\
+ gettimeofday(&timeSecs, NULL); \
+ curTime = timeSecs.tv_sec;\
+ ptm = localtime_r(&curTime, &Tm);\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\
+ ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\
+ ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\
+ taosGetSelfPthreadId());\
+ fprintf(stderr, " \033[0m");\
+ errorPrint(fmt, __VA_ARGS__);\
+ } while(0)
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
@@ -676,7 +717,7 @@ static FILE * g_fpOfInsertResult = NULL;
///////////////////////////////////////////////////
-static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); }
+static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); }
#ifndef TAOSDEMO_COMMIT_SHA1
#define TAOSDEMO_COMMIT_SHA1 "unknown"
@@ -696,101 +737,108 @@ static void printVersion() {
char taosdemo_status[] = TAOSDEMO_STATUS;
if (strlen(taosdemo_status) == 0) {
- printf("taosdemo verison %s-%s\n",
+ printf("taosdemo version %s-%s\n",
tdengine_ver, taosdemo_ver);
} else {
- printf("taosdemo verison %s-%s, status:%s\n",
+ printf("taosdemo version %s-%s, status:%s\n",
tdengine_ver, taosdemo_ver, taosdemo_status);
}
}
static void printHelp() {
- char indent[10] = " ";
- printf("%s%s%s%s\n", indent, "-f", indent,
+ char indent[10] = " ";
+ printf("%s\n\n", "Usage: taosdemo [OPTION...]");
+ printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t",
"The meta file to the execution procedure. Default is './meta.json'.");
- printf("%s%s%s%s\n", indent, "-u", indent,
- "The TDengine user name to use when connecting to the server. Default is 'root'.");
+ printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t",
+ "The user name to use when connecting to the server.");
#ifdef _TD_POWER_
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The password to use when connecting to the server. Default is 'powerdb'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server. Default is 'powerdb'");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
"Configuration directory. Default is '/etc/power/'.");
#elif (_TD_TQ_ == true)
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The password to use when connecting to the server. Default is 'tqueue'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server. Default is 'tqueue'");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
"Configuration directory. Default is '/etc/tq/'.");
#else
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The password to use when connecting to the server. Default is 'taosdata'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/taos/'.");
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server.");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory.");
#endif
- printf("%s%s%s%s\n", indent, "-h", indent,
- "The host to connect to TDengine. Default is localhost.");
- printf("%s%s%s%s\n", indent, "-p", indent,
- "The TCP/IP port number to use for the connection. Default is 0.");
- printf("%s%s%s%s\n", indent, "-I", indent,
-#if STMT_IFACE_ENABLED == 1
+ printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t",
+ "TDengine server FQDN to connect. The default host is localhost.");
+ printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t",
+ "The TCP/IP port number to use for the connection.");
+ printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t",
"The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.");
-#else
- "The interface (taosc, rest) taosdemo uses. Default is 'taosc'.");
-#endif
- printf("%s%s%s%s\n", indent, "-d", indent,
+ printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t",
"Destination database. Default is 'test'.");
- printf("%s%s%s%s\n", indent, "-a", indent,
+ printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t",
"Set the replica parameters of the database, Default 1, min: 1, max: 3.");
- printf("%s%s%s%s\n", indent, "-m", indent,
+ printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t",
"Table prefix name. Default is 'd'.");
- printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file.");
- printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag.");
- printf("%s%s%s%s\n", indent, "-o", indent,
+ printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t",
+ "The select sql file.");
+ printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag.");
+ printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t",
"Direct output to the named file. Default is './output.txt'.");
- printf("%s%s%s%s\n", indent, "-q", indent,
+ printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t",
"Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
- printf("%s%s%s%s\n", indent, "-b", indent,
+ printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t",
"The data_type of columns, default: FLOAT, INT, FLOAT.");
- printf("%s%s%s%s\n", indent, "-w", indent,
- "The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
- printf("%s%s%s%s%d%s%d\n", indent, "-l", indent,
- "The number of columns per record. Default is ",
+ printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t",
+ "The width of data_type 'BINARY' or 'NCHAR'. Default is ",
+ g_args.binwidth);
+ printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t",
+ "The number of columns per record. Demo mode by default is ",
DEFAULT_DATATYPE_NUM,
- ". Max values is ",
+ " (float, int, float). Max values is ",
MAX_NUM_COLUMNS);
printf("%s%s%s%s\n", indent, indent, indent,
- "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
- printf("%s%s%s%s\n", indent, "-T", indent,
+ "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
+ printf("%s%s%s%s\n", indent, "-T, --threads=NUMBER", "\t\t",
"The number of threads. Default is 10.");
- printf("%s%s%s%s\n", indent, "-i", indent,
+ printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t",
"The sleep time (ms) between insertion. Default is 0.");
- printf("%s%s%s%s%d.\n", indent, "-S", indent,
+ printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t",
"The timestamp step between insertion. Default is ",
DEFAULT_TIMESTAMP_STEP);
- printf("%s%s%s%s\n", indent, "-r", indent,
+ printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t",
+ "The interlace rows of insertion. Default is ",
+ DEFAULT_INTERLACE_ROWS);
+ printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t",
"The number of records per request. Default is 30000.");
- printf("%s%s%s%s\n", indent, "-t", indent,
+ printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t",
"The number of tables. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-n", indent,
+ printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t",
"The number of records per table. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-M", indent,
+ printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t",
"The value of records generated are totally random.");
- printf("%s%s%s%s\n", indent, indent, indent,
- " The default is to simulate power equipment senario.");
- printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag.");
- printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt.");
- printf("%s%s%s%s\n", indent, "-O", indent,
- "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.");
- printf("%s%s%s%s\n", indent, "-R", indent,
+ printf("%s\n", "\t\t\t\tThe default is to simulate power equipment scenario.");
+ printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t",
+ "No-insert flag.");
+ printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt.");
+ printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t",
+ "Insert order mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.");
+ printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t",
"Out of order data's range, ms, default is 1000.");
- printf("%s%s%s%s\n", indent, "-g", indent,
+ printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t",
"Print debug info.");
- printf("%s%s%s\n", indent, "-V, --version\t",
- "Print version info.");
- printf("%s%s%s%s\n", indent, "--help\t", indent,
- "Print command line arguments list info.");
+ printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t",
+ "Give this help list");
+ printf("%s%s%s%s\n", indent, " --usage\t", "\t\t",
+ "Give a short usage message");
+ printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version.");
/* printf("%s%s%s%s\n", indent, "-D", indent,
"Delete database if exists. 0: no, 1: yes, default is 1");
*/
+ printf("\nMandatory or optional arguments to long options are also mandatory or optional\n\
+for any corresponding short options.\n\
+\n\
+Report bugs to .\n");
}
static bool isStringNumber(char *input)
@@ -808,113 +856,441 @@ static bool isStringNumber(char *input)
return true;
}
+static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value)
+{
+ fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorUnrecognized(char *program, char *wrong_arg)
+{
+ fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires an argument -- '%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg2(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires a number argument '-%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg3(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option '%s' requires an argument\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-f") == 0) {
+ if ((0 == strncmp(argv[i], "-f", strlen("-f")))
+ || (0 == strncmp(argv[i], "--file", strlen("--file")))) {
arguments->demo_mode = false;
- arguments->metaFile = argv[++i];
- } else if (strcmp(argv[i], "-c") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-c need a valid path following!\n");
+
+ if (2 == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg(argv[0], "f");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("-f"));
+ } else if (strlen("--file") == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg3(argv[0], "--file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("--file="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
- } else if (strcmp(argv[i], "-h") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-h need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-c", strlen("-c")))
+ || (0 == strncmp(argv[i], "--config-dir", strlen("--config-dir")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "c");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN);
+ } else if (strlen("--config-dir") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--config-dir");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->host = argv[++i];
- } else if (strcmp(argv[i], "-p") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-p need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-h", strlen("-h")))
+ || (0 == strncmp(argv[i], "--host", strlen("--host")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "h");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) {
+ arguments->host = (char *)(argv[i] + strlen("-h"));
+ } else if (strlen("--host") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--host");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) {
+ arguments->host = (char *)(argv[i] + strlen("--host="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->port = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-I") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-I need a valid string following!\n");
+ } else if (strcmp(argv[i], "-PP") == 0) {
+ arguments->performance_print = true;
+ } else if ((0 == strncmp(argv[i], "-P", strlen("-P")))
+ || (0 == strncmp(argv[i], "--port", strlen("--port")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "P");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "P");
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--port=")))) {
+ arguments->port = atoi((char *)(argv[i]+strlen("--port=")));
+ }
+ } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-P")))) {
+ arguments->port = atoi((char *)(argv[i]+strlen("-P")));
+ }
+ } else if (strlen("--port") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- ++i;
- if (0 == strcasecmp(argv[i], "taosc")) {
- arguments->iface = TAOSC_IFACE;
- } else if (0 == strcasecmp(argv[i], "rest")) {
- arguments->iface = REST_IFACE;
-#if STMT_IFACE_ENABLED == 1
- } else if (0 == strcasecmp(argv[i], "stmt")) {
- arguments->iface = STMT_IFACE;
-#endif
+ } else if ((0 == strncmp(argv[i], "-I", strlen("-I")))
+ || (0 == strncmp(argv[i], "--interface", strlen("--interface")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "I");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
+ } else if (0 == strncmp(argv[i], "--interface=", strlen("--interface="))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I",
+ (char *)(argv[i] + strlen("-I")));
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interface") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "--interface", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
} else {
- errorPrint("%s", "\n\t-I need a valid string following!\n");
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- } else if (strcmp(argv[i], "-u") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-u need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-u", strlen("-u")))
+ || (0 == strncmp(argv[i], "--user", strlen("--user")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "u");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) {
+ arguments->user = (char *)(argv[i++] + strlen("-u"));
+ } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) {
+ arguments->user = (char *)(argv[i++] + strlen("--user="));
+ } else if (strlen("--user") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--user");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->user = argv[++i];
- } else if (strcmp(argv[i], "-P") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-P need a valid string following!\n");
- exit(EXIT_FAILURE);
+ } else if ((0 == strncmp(argv[i], "-p", strlen("-p")))
+ || (0 == strcmp(argv[i], "--password"))) {
+ if ((strlen(argv[i]) == 2) || (0 == strcmp(argv[i], "--password"))) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", arguments->password) > 1) {
+ fprintf(stderr, "password read error!\n");
+ }
+ taosSetConsoleEcho(true);
+ } else {
+ tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
}
- arguments->password = argv[++i];
- } else if (strcmp(argv[i], "-o") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-o need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-o", strlen("-o")))
+ || (0 == strncmp(argv[i], "--output", strlen("--output")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else if (0 == strncmp(argv[i], "--output=", strlen("--output="))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("--output="));
+ } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("-o"));
+ } else if (strlen("--output") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->output_file = argv[++i];
- } else if (strcmp(argv[i], "-s") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-s need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-s", strlen("-s")))
+ || (0 == strncmp(argv[i], "--sql-file", strlen("--sql-file")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "s");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file="));
+ } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("-s"));
+ } else if (strlen("--sql-file") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--sql-file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->sqlFile = argv[++i];
- } else if (strcmp(argv[i], "-q") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n");
+ } else if ((0 == strncmp(argv[i], "-q", strlen("-q")))
+ || (0 == strncmp(argv[i], "--query-mode", strlen("--query-mode")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "q");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "q");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--query-mode=", strlen("--query-mode="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--query-mode=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--query-mode=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-q")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("-q")));
+ } else {
+ errorPrintReqArg2(argv[0], "-q");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--query-mode") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->async_mode = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-T") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-T need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-T", strlen("-T")))
+ || (0 == strncmp(argv[i], "--threads", strlen("--threads")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "T");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "T");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-T")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("-T")));
+ } else {
+ errorPrintReqArg2(argv[0], "-T");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--threads") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_threads = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-i") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-i need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-i", strlen("-i")))
+ || (0 == strncmp(argv[i], "--insert-interval", strlen("--insert-interval")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "i");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "i");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--insert-interval=", strlen("--insert-interval="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--insert-interval=")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("--insert-interval=")));
+ } else {
+ errorPrintReqArg3(argv[0], "--insert-innterval");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-i")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("-i")));
+ } else {
+ errorPrintReqArg3(argv[0], "-i");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--insert-interval")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->insert_interval = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-S") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("\n\t%s%s", argv[i], " need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-S", strlen("-S")))
+ || (0 == strncmp(argv[i], "--time-step", strlen("--time-step")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "S");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "S");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--time-step=", strlen("--time-step="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--time-step=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--time-step=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-S")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("-S")));
+ } else {
+ errorPrintReqArg2(argv[0], "-S");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--time-step") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->timestamp_step = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
if ((argc == i+1)
|| (!isStringNumber(argv[i+1]))) {
@@ -923,97 +1299,308 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->query_times = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-B") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-B need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-B", strlen("-B")))
+ || (0 == strncmp(argv[i], "--interlace-rows", strlen("--interlace-rows")))) {
+ if (strlen("-B") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "B");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "B");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-B")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B")));
+ } else {
+ errorPrintReqArg2(argv[0], "-B");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interlace-rows")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->interlace_rows = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-r") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-r need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-r", strlen("-r")))
+ || (0 == strncmp(argv[i], "--rec-per-req", 13))) {
+ if (strlen("-r") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "r");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "r");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-r")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r")));
+ } else {
+ errorPrintReqArg2(argv[0], "-r");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--rec-per-req")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_RPR = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-t") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-t need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-t", strlen("-t")))
+ || (0 == strncmp(argv[i], "--tables", strlen("--tables")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "t");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "t");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("--tables=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-t")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("-t")));
+ } else {
+ errorPrintReqArg2(argv[0], "-t");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--tables") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_tables = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-n") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-n need a number following!\n");
+
+ g_totalChildTables = arguments->ntables;
+ } else if ((0 == strncmp(argv[i], "-n", strlen("-n")))
+ || (0 == strncmp(argv[i], "--records", strlen("--records")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "n");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--records=")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("--records=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-n")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("-n")));
+ } else {
+ errorPrintReqArg2(argv[0], "-n");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--records") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_DPT = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-d") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-d need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-d", strlen("-d")))
+ || (0 == strncmp(argv[i], "--database", strlen("--database")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "d");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else if (0 == strncmp(argv[i], "--database=", strlen("--database="))) {
+ arguments->output_file = (char *)(argv[i] + strlen("--database="));
+ } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) {
+ arguments->output_file = (char *)(argv[i] + strlen("-d"));
+ } else if (strlen("--database") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--database");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->database = argv[++i];
- } else if (strcmp(argv[i], "-l") == 0) {
+ } else if ((0 == strncmp(argv[i], "-l", strlen("-l")))
+ || (0 == strncmp(argv[i], "--columns", strlen("--columns")))) {
arguments->demo_mode = false;
- if (argc == i+1) {
- if (!isStringNumber(argv[i+1])) {
- printHelp();
- errorPrint("%s", "\n\t-l need a number following!\n");
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "l");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "l");
+ exit(EXIT_FAILURE);
+ }
+ arguments->columnCount = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-l")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("-l")));
+ } else {
+ errorPrintReqArg2(argv[0], "-l");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--columns")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--columns");
exit(EXIT_FAILURE);
}
+ arguments->columnCount = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
}
- arguments->num_of_CPR = atoi(argv[++i]);
- if (arguments->num_of_CPR > MAX_NUM_COLUMNS) {
- printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS);
+ if (arguments->columnCount > MAX_NUM_COLUMNS) {
+ printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS);
prompt();
- arguments->num_of_CPR = MAX_NUM_COLUMNS;
+ arguments->columnCount = MAX_NUM_COLUMNS;
}
- for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) {
- arguments->datatype[col] = "INT";
+ for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) {
+ arguments->dataType[col] = "INT";
+ arguments->data_type[col] = TSDB_DATA_TYPE_INT;
}
- for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
- arguments->datatype[col] = NULL;
+ for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) {
+ arguments->dataType[col] = NULL;
+ arguments->data_type[col] = TSDB_DATA_TYPE_NULL;
}
- } else if (strcmp(argv[i], "-b") == 0) {
+ } else if ((0 == strncmp(argv[i], "-b", strlen("-b")))
+ || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) {
arguments->demo_mode = false;
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-b need valid string following!\n");
+
+ char *dataType;
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "b");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else if (0 == strncmp(argv[i], "--data-type=", strlen("--data-type="))) {
+ dataType = (char *)(argv[i] + strlen("--data-type="));
+ } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) {
+ dataType = (char *)(argv[i] + strlen("-b"));
+ } else if (strlen("--data-type") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--data-type");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- ++i;
- if (strstr(argv[i], ",") == NULL) {
+
+ if (strstr(dataType, ",") == NULL) {
// only one col
- if (strcasecmp(argv[i], "INT")
- && strcasecmp(argv[i], "FLOAT")
- && strcasecmp(argv[i], "TINYINT")
- && strcasecmp(argv[i], "BOOL")
- && strcasecmp(argv[i], "SMALLINT")
- && strcasecmp(argv[i], "BIGINT")
- && strcasecmp(argv[i], "DOUBLE")
- && strcasecmp(argv[i], "BINARY")
- && strcasecmp(argv[i], "TIMESTAMP")
- && strcasecmp(argv[i], "NCHAR")) {
+ if (strcasecmp(dataType, "INT")
+ && strcasecmp(dataType, "FLOAT")
+ && strcasecmp(dataType, "TINYINT")
+ && strcasecmp(dataType, "BOOL")
+ && strcasecmp(dataType, "SMALLINT")
+ && strcasecmp(dataType, "BIGINT")
+ && strcasecmp(dataType, "DOUBLE")
+ && strcasecmp(dataType, "BINARY")
+ && strcasecmp(dataType, "TIMESTAMP")
+ && strcasecmp(dataType, "NCHAR")) {
printHelp();
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
- arguments->datatype[0] = argv[i];
+ arguments->dataType[0] = dataType;
+ if (0 == strcasecmp(dataType, "INT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(dataType, "TINYINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strcasecmp(dataType, "SMALLINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(dataType, "BIGINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(dataType, "FLOAT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(dataType, "DOUBLE")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strcasecmp(dataType, "BINARY")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strcasecmp(dataType, "NCHAR")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(dataType, "BOOL")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(dataType, "TIMESTAMP")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[1] = NULL;
+ arguments->data_type[1] = TSDB_DATA_TYPE_NULL;
} else {
// more than one col
int index = 0;
- g_dupstr = strdup(argv[i]);
+ g_dupstr = strdup(dataType);
char *running = g_dupstr;
char *token = strsep(&running, ",");
while(token != NULL) {
@@ -1032,117 +1619,367 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
- arguments->datatype[index++] = token;
- token = strsep(&running, ",");
- if (index >= MAX_NUM_COLUMNS) break;
+
+ if (0 == strcasecmp(token, "INT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(token, "FLOAT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(token, "SMALLINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(token, "BIGINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(token, "DOUBLE")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(token, "TINYINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strcasecmp(token, "BINARY")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strcasecmp(token, "NCHAR")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(token, "BOOL")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(token, "TIMESTAMP")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[index] = token;
+ index ++;
+ token = strsep(&running, ",");
+ if (index >= MAX_NUM_COLUMNS) break;
}
- arguments->datatype[index] = NULL;
- }
- } else if (strcmp(argv[i], "-w") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-w need a number following!\n");
+ arguments->dataType[index] = NULL;
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ } else if ((0 == strncmp(argv[i], "-w", strlen("-w")))
+ || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "w");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "w");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("--binwidth=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-w")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("-w")));
+ } else {
+ errorPrintReqArg2(argv[0], "-w");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--binwidth") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->len_of_binary = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-m") == 0) {
- if ((argc == i+1) ||
- (isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-m need a letter-initial string following!\n");
+ } else if ((0 == strncmp(argv[i], "-m", strlen("-m")))
+ || (0 == strncmp(argv[i], "--table-prefix", strlen("--table-prefix")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "m");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else if (0 == strncmp(argv[i], "--table-prefix=", strlen("--table-prefix="))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("--table-prefix="));
+ } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("-m"));
+ } else if (strlen("--table-prefix") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--table-prefix");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->tb_prefix = argv[++i];
- } else if (strcmp(argv[i], "-N") == 0) {
+ } else if ((strcmp(argv[i], "-N") == 0)
+ || (0 == strcmp(argv[i], "--normal-table"))) {
arguments->use_metric = false;
- } else if (strcmp(argv[i], "-M") == 0) {
+ } else if ((strcmp(argv[i], "-M") == 0)
+ || (0 == strcmp(argv[i], "--random"))) {
arguments->demo_mode = false;
- } else if (strcmp(argv[i], "-x") == 0) {
+ } else if ((strcmp(argv[i], "-x") == 0)
+ || (0 == strcmp(argv[i], "--no-insert"))) {
arguments->insert_only = false;
- } else if (strcmp(argv[i], "-y") == 0) {
+ } else if ((strcmp(argv[i], "-y") == 0)
+ || (0 == strcmp(argv[i], "--answer-yes"))) {
arguments->answer_yes = true;
- } else if (strcmp(argv[i], "-g") == 0) {
+ } else if ((strcmp(argv[i], "-g") == 0)
+ || (0 == strcmp(argv[i], "--debug"))) {
arguments->debug_print = true;
} else if (strcmp(argv[i], "-gg") == 0) {
arguments->verbose_print = true;
- } else if (strcmp(argv[i], "-pp") == 0) {
- arguments->performance_print = true;
- } else if (strcmp(argv[i], "-O") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-O need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-R", strlen("-R")))
+ || (0 == strncmp(argv[i], "--disorder-range",
+ strlen("--disorder-range")))) {
+ if (strlen("-R") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "R");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "R");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder-range=",
+ strlen("--disorder-range="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("--disorder-range=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-R")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("-R")));
+ } else {
+ errorPrintReqArg2(argv[0], "-R");
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->disorderRange < 0) {
+ errorPrint("Invalid disorder range %d, will be set to %d\n",
+ arguments->disorderRange, 1000);
+ arguments->disorderRange = 1000;
+ }
+ } else if (strlen("--disorder-range") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-O", strlen("-O")))
+ || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "O");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "O");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder=", strlen("--disorder="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("--disorder=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-O")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("-O")));
+ } else {
+ errorPrintReqArg2(argv[0], "-O");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--disorder") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
-
- arguments->disorderRatio = atoi(argv[++i]);
if (arguments->disorderRatio > 50) {
+ errorPrint("Invalid disorder ratio %d, will be set to %d\n",
+ arguments->disorderRatio, 50);
arguments->disorderRatio = 50;
}
if (arguments->disorderRatio < 0) {
+ errorPrint("Invalid disorder ratio %d, will be set to %d\n",
+ arguments->disorderRatio, 0);
arguments->disorderRatio = 0;
}
-
- } else if (strcmp(argv[i], "-R") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-R need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-a", strlen("-a")))
+ || (0 == strncmp(argv[i], "--replica",
+ strlen("--replica")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "a");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "a");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--replica=",
+ strlen("--replica="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("--replica=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-a")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("-a")));
+ } else {
+ errorPrintReqArg2(argv[0], "-a");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--replica") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->disorderRange = atoi(argv[++i]);
- if (arguments->disorderRange < 0)
- arguments->disorderRange = 1000;
-
- } else if (strcmp(argv[i], "-a") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-a need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->replica = atoi(argv[++i]);
if (arguments->replica > 3 || arguments->replica < 1) {
+ errorPrint("Invalid replica value %d, will be set to %d\n",
+ arguments->replica, 1);
arguments->replica = 1;
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
if (arguments->method_of_delete > 3) {
- errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ errorPrint("%s", "\n\t-D need a value (0~3) number following!\n");
exit(EXIT_FAILURE);
}
- } else if ((strcmp(argv[i], "--version") == 0) ||
- (strcmp(argv[i], "-V") == 0)){
+ } else if ((strcmp(argv[i], "--version") == 0)
+ || (strcmp(argv[i], "-V") == 0)) {
printVersion();
exit(0);
- } else if (strcmp(argv[i], "--help") == 0) {
+ } else if ((strcmp(argv[i], "--help") == 0)
+ || (strcmp(argv[i], "-?") == 0)) {
printHelp();
exit(0);
+ } else if (strcmp(argv[i], "--usage") == 0) {
+ printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\
+ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\
+ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\
+ [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\
+ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\
+ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\
+ [--help] [--usage] [--version]\n");
+ exit(0);
} else {
- printHelp();
- errorPrint("%s", "ERROR: wrong options\n");
+ // to simulate argp_option output
+ if (strlen(argv[i]) > 2) {
+ if (0 == strncmp(argv[i], "--", 2)) {
+ fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], argv[i]);
+ } else if (0 == strncmp(argv[i], "-", 1)) {
+ char tmp[2] = {0};
+ tstrncpy(tmp, argv[i]+1, 2);
+ fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], tmp);
+ } else {
+ fprintf(stderr, "%s: Too many arguments\n", argv[0]);
+ }
+ } else {
+ fprintf(stderr, "%s invalid options -- '%s'\n", argv[0],
+ (char *)((char *)argv[i])+1);
+ }
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
exit(EXIT_FAILURE);
}
}
int columnCount;
for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
- if (g_args.datatype[columnCount] == NULL) {
+ if (g_args.dataType[columnCount] == NULL) {
break;
}
}
if (0 == columnCount) {
- perror("data type error!");
- exit(-1);
+ ERROR_EXIT("data type error!");
+ }
+ g_args.columnCount = columnCount;
+
+ g_args.lenOfOneRow = 20; // timestamp
+ for (int c = 0; c < g_args.columnCount; c++) {
+ switch(g_args.data_type[c]) {
+ case TSDB_DATA_TYPE_BINARY:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ g_args.lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ g_args.lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ g_args.lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ g_args.lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ g_args.lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ g_args.lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ g_args.lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
}
- g_args.num_of_CPR = columnCount;
- if (((arguments->debug_print) && (arguments->metaFile == NULL))
+ if (((arguments->debug_print) && (NULL != arguments->metaFile))
|| arguments->verbose_print) {
printf("###################################################################\n");
printf("# meta file: %s\n", arguments->metaFile);
@@ -1153,11 +1990,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf("# Password: %s\n", arguments->password);
printf("# Use metric: %s\n",
arguments->use_metric ? "true" : "false");
- if (*(arguments->datatype)) {
+ if (*(arguments->dataType)) {
printf("# Specified data type: ");
- for (int i = 0; i < MAX_NUM_COLUMNS; i++)
- if (arguments->datatype[i])
- printf("%s,", arguments->datatype[i]);
+ for (int c = 0; c < MAX_NUM_COLUMNS; c++)
+ if (arguments->dataType[c])
+ printf("%s,", arguments->dataType[c]);
else
break;
printf("\n");
@@ -1165,15 +2002,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
printf("# Number of records per req: %u\n",
- arguments->num_of_RPR);
+ arguments->reqPerReq);
printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
- printf("# Length of Binary: %d\n", arguments->len_of_binary);
- printf("# Number of Threads: %d\n", arguments->num_of_threads);
+ printf("# Length of Binary: %d\n", arguments->binwidth);
+ printf("# Number of Threads: %d\n", arguments->nthreads);
printf("# Number of Tables: %"PRId64"\n",
- arguments->num_of_tables);
+ arguments->ntables);
printf("# Number of Data per Table: %"PRId64"\n",
- arguments->num_of_DPT);
+ arguments->insertRows);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
if (arguments->disorderRatio) {
@@ -1199,31 +2036,20 @@ static void tmfclose(FILE *fp) {
static void tmfree(char *buf) {
if (NULL != buf) {
free(buf);
+ buf = NULL;
}
}
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
- for (i = 0; i < 5 /* retry */; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
- }
+ verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
- }
- }
+ TAOS_RES *res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
- verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
if (code != 0) {
if (!quiet) {
- errorPrint("Failed to execute %s, reason: %s\n",
+ errorPrint2("Failed to execute <%s>, reason: %s\n",
command, taos_errstr(res));
}
taos_free_result(res);
@@ -1245,7 +2071,7 @@ static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{
pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
if (pThreadInfo->fp == NULL) {
- errorPrint(
+ errorPrint2(
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
__func__, __LINE__, pThreadInfo->filePath);
return;
@@ -1264,7 +2090,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
- errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
+ errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
return ;
}
@@ -1304,7 +2130,7 @@ static void selectAndGetResult(
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
TAOS_RES *res = taos_query(pThreadInfo->taos, command);
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return;
@@ -1323,23 +2149,23 @@ static void selectAndGetResult(
}
} else {
- errorPrint("%s() LN%d, unknown query mode: %s\n",
+ errorPrint2("%s() LN%d, unknown query mode: %s\n",
__func__, __LINE__, g_queryInfo.queryMode);
}
}
-static char *rand_bool_str(){
+static char *rand_bool_str() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN);
}
-static int32_t rand_bool(){
+static int32_t rand_bool() {
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 2;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 2;
}
static char *rand_tinyint_str()
@@ -1355,8 +2181,8 @@ static int32_t rand_tinyint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 128;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 128;
}
static char *rand_smallint_str()
@@ -1372,8 +2198,8 @@ static int32_t rand_smallint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 32767;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 32767;
}
static char *rand_int_str()
@@ -1388,8 +2214,8 @@ static int32_t rand_int()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND];
}
static char *rand_bigint_str()
@@ -1405,8 +2231,8 @@ static int64_t rand_bigint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randbigint[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randbigint[cursor % MAX_PREPARED_RAND];
}
static char *rand_float_str()
@@ -1417,12 +2243,13 @@ static char *rand_float_str()
return g_randfloat_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
+
static float rand_float()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randfloat[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randfloat[cursor % MAX_PREPARED_RAND];
}
static char *demo_current_float_str()
@@ -1438,8 +2265,9 @@ static float UNUSED_FUNC demo_current_float()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000);
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10)
+ + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000);
}
static char *demo_voltage_int_str()
@@ -1455,8 +2283,8 @@ static int32_t UNUSED_FUNC demo_voltage_int()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return 215 + g_randint[cursor] % 10;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10;
}
static char *demo_phase_float_str() {
@@ -1466,11 +2294,12 @@ static char *demo_phase_float_str() {
return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
-static float UNUSED_FUNC demo_phase_float(){
+static float UNUSED_FUNC demo_phase_float() {
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360);
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10
+ + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360);
}
#if 0
@@ -1544,7 +2373,7 @@ static void init_rand_data() {
g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND);
assert(g_randdouble_buff);
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
+ for (int i = 0; i < MAX_PREPARED_RAND; i++) {
g_randint[i] = (int)(taosRandom() % 65535);
sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d",
g_randint[i]);
@@ -1621,11 +2450,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n",
- g_Dbs.threadCountByCreateTbl);
+ g_Dbs.threadCountForCreateTbl);
printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
printf("number of records per req: \033[33m%u\033[0m\n",
- g_args.num_of_RPR);
+ g_args.reqPerReq);
printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
@@ -1636,9 +2465,9 @@ static int printfInsertMeta() {
printf(" database[%d] name: \033[33m%s\033[0m\n",
i, g_Dbs.db[i].dbName);
if (0 == g_Dbs.db[i].drop) {
- printf(" drop: \033[33mno\033[0m\n");
+ printf(" drop: \033[33m no\033[0m\n");
} else {
- printf(" drop: \033[33myes\033[0m\n");
+ printf(" drop: \033[33m yes\033[0m\n");
}
if (g_Dbs.db[i].dbCfg.blocks > 0) {
@@ -1690,9 +2519,7 @@ static int printfInsertMeta() {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
-#if NANO_SECOND_ENABLED == 1
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))
-#endif
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) {
printf(" precision: \033[33m%s\033[0m\n",
g_Dbs.db[i].dbCfg.precision);
@@ -1709,7 +2536,7 @@ static int printfInsertMeta() {
printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
@@ -1749,9 +2576,9 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
+ printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n");
}else {
- printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
+ printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n");
}
*/
printf(" interlaceRows: \033[33m%u\033[0m\n",
@@ -1831,8 +2658,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "configDir: %s\n", configDir);
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
- fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR);
+ fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl);
+ fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq);
fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
@@ -1883,9 +2710,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
-#if NANO_SECOND_ENABLED == 1
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))
-#endif
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
fprintf(fp, " precision: %s\n",
g_Dbs.db[i].dbCfg.precision);
@@ -1901,7 +2726,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " super table[%d]:\n", j);
fprintf(fp, " stbName: %s\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
fprintf(fp, " autoCreateTable: %s\n", "no");
@@ -2060,7 +2885,7 @@ static void printfQueryMeta() {
printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
+ g_queryInfo.superQueryInfo.stbName);
printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
@@ -2088,10 +2913,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
-#if NANO_SECOND_ENABLED == 1
} if (precision == TSDB_TIME_PRECISION_NANO) {
tt = (time_t)(val / 1000000000);
-#endif
} else {
tt = (time_t)(val / 1000);
}
@@ -2113,10 +2936,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
-#if NANO_SECOND_ENABLED == 1
} else if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
-#endif
} else {
sprintf(buf + pos, ".%03d", (int)(val % 1000));
}
@@ -2135,36 +2956,45 @@ static void xDumpFieldToFile(FILE* fp, const char* val,
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
- fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
+ fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0));
break;
+
case TSDB_DATA_TYPE_TINYINT:
fprintf(fp, "%d", *((int8_t *)val));
break;
+
case TSDB_DATA_TYPE_SMALLINT:
fprintf(fp, "%d", *((int16_t *)val));
break;
+
case TSDB_DATA_TYPE_INT:
fprintf(fp, "%d", *((int32_t *)val));
break;
+
case TSDB_DATA_TYPE_BIGINT:
- fprintf(fp, "%" PRId64, *((int64_t *)val));
+ fprintf(fp, "%"PRId64"", *((int64_t *)val));
break;
+
case TSDB_DATA_TYPE_FLOAT:
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
break;
+
case TSDB_DATA_TYPE_DOUBLE:
fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
break;
+
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
memcpy(buf, val, length);
buf[length] = 0;
fprintf(fp, "\'%s\'", buf);
break;
+
case TSDB_DATA_TYPE_TIMESTAMP:
formatTimestamp(buf, *(int64_t*)val, precision);
fprintf(fp, "'%s'", buf);
break;
+
default:
break;
}
@@ -2178,7 +3008,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
FILE* fp = fopen(fname, "at");
if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file: %s\n",
+ errorPrint2("%s() LN%d, failed to open file: %s\n",
__func__, __LINE__, fname);
return -1;
}
@@ -2225,7 +3055,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
int32_t code = taos_errno(res);
if (code != 0) {
- errorPrint( "failed to run , reason: %s\n",
+ errorPrint2("failed to run , reason: %s\n",
taos_errstr(res));
return -1;
}
@@ -2241,7 +3071,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (dbInfos[count] == NULL) {
- errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count);
+ errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count);
return -1;
}
@@ -2324,15 +3154,15 @@ static void printfDbInfoForQueryToFile(
}
static void printfQuerySystemInfo(TAOS * taos) {
- char filename[BUFFER_SIZE+1] = {0};
- char buffer[BUFFER_SIZE+1] = {0};
+ char filename[MAX_FILE_NAME_LEN] = {0};
+ char buffer[1024] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
- snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d",
+ snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d",
lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min,
lt->tm_sec);
@@ -2364,12 +3194,12 @@ static void printfQuerySystemInfo(TAOS * taos) {
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
- snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name);
+ snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
- snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name);
+ snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
@@ -2394,7 +3224,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
request_buf = malloc(req_buf_len);
if (NULL == request_buf) {
- errorPrint("%s", "ERROR, cannot allocate memory.\n");
+ errorPrint("%s", "cannot allocate memory.\n");
exit(EXIT_FAILURE);
}
@@ -2430,14 +3260,14 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
#endif
debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd);
free(request_buf);
- ERROR_EXIT("ERROR opening socket");
+ ERROR_EXIT("opening socket");
}
int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr));
debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn);
if (retConn < 0) {
free(request_buf);
- ERROR_EXIT("ERROR connecting");
+ ERROR_EXIT("connecting");
}
memset(base64_buf, 0, INPUT_BUF_LEN);
@@ -2470,7 +3300,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
auth, strlen(sqlstr), sqlstr);
if (r >= req_buf_len) {
free(request_buf);
- ERROR_EXIT("ERROR too long request");
+ ERROR_EXIT("too long request");
}
verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf);
@@ -2483,7 +3313,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
bytes = write(sockfd, request_buf + sent, req_str_len - sent);
#endif
if (bytes < 0)
- ERROR_EXIT("ERROR writing message to socket");
+ ERROR_EXIT("writing message to socket");
if (bytes == 0)
break;
sent+=bytes;
@@ -2500,7 +3330,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
#endif
if (bytes < 0) {
free(request_buf);
- ERROR_EXIT("ERROR reading response from socket");
+ ERROR_EXIT("reading response from socket");
}
if (bytes == 0)
break;
@@ -2509,7 +3339,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
if (received == resp_len) {
free(request_buf);
- ERROR_EXIT("ERROR storing complete response from socket");
+ ERROR_EXIT("storing complete response from socket");
}
response_buf[RESP_BUF_LEN - 1] = '\0';
@@ -2533,7 +3363,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
if (NULL == dataBuf) {
- errorPrint("%s() LN%d, calloc failed! size:%d\n",
+ errorPrint2("%s() LN%d, calloc failed! size:%d\n",
__func__, __LINE__, TSDB_MAX_SQL_LEN+1);
return NULL;
}
@@ -2598,7 +3428,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
if ((g_args.demo_mode) && (i == 0)) {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64",", tableSeq % 10);
+ "%"PRId64",", (tableSeq % 10) + 1);
} else {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
@@ -2633,7 +3463,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
"%"PRId64",", rand_bigint());
} else {
- errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType);
+ errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType);
tmfree(dataBuf);
return NULL;
}
@@ -2651,29 +3481,50 @@ static int calcRowLen(SSuperTable* superTbls) {
for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
char* dataType = superTbls->columns[colIndex].dataType;
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfOneRow += INT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfOneRow += BIGINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfOneRow += SMALLINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfOneRow += TINYINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfOneRow += BOOL_BUFF_LEN;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfOneRow += FLOAT_BUFF_LEN;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfOneRow += DOUBLE_BUFF_LEN;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- lenOfOneRow += TIMESTAMP_BUFF_LEN;
- } else {
- printf("get error data type : %s\n", dataType);
- exit(-1);
+ switch(superTbls->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", dataType);
+ exit(EXIT_FAILURE);
}
}
@@ -2703,8 +3554,8 @@ static int calcRowLen(SSuperTable* superTbls) {
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
} else {
- printf("get error tag type : %s\n", dataType);
- exit(-1);
+ errorPrint2("get error tag type : %s\n", dataType);
+ exit(EXIT_FAILURE);
}
}
@@ -2713,12 +3564,11 @@ static int calcRowLen(SSuperTable* superTbls) {
return 0;
}
-
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
- char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
+ char* dbName, char* stbName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
- char command[BUFFER_SIZE] = "\0";
+ char command[1024] = "\0";
char limitBuf[100] = "\0";
TAOS_RES * res;
@@ -2732,17 +3582,17 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
//get all child table name use cmd: select tbname from superTblName;
- snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s",
- dbName, sTblName, limitBuf);
+ snprintf(command, 1024, "select tbname from %s.%s %s",
+ dbName, stbName, limitBuf);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to run command %s\n",
+ errorPrint2("%s() LN%d, failed to run command %s\n",
__func__, __LINE__, command);
- exit(-1);
+ exit(EXIT_FAILURE);
}
int64_t childTblCount = (limit < 0)?10000:limit;
@@ -2752,8 +3602,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
if (NULL == childTblName) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
- exit(-1);
+ errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
}
@@ -2762,9 +3612,9 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
int32_t* len = taos_fetch_lengths(res);
if (0 == strlen((char *)row[0])) {
- errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n",
__func__, __LINE__, count);
- exit(-1);
+ exit(EXIT_FAILURE);
}
tstrncpy(pTblName, (char *)row[0], len[0]+1);
@@ -2780,12 +3630,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
(size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
} else {
// exit, if allocate more memory failed
- errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
- __func__, __LINE__, dbName, sTblName);
tmfree(childTblName);
taos_free_result(res);
taos_close(taos);
- exit(-1);
+ errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n",
+ __func__, __LINE__, dbName, stbName);
+ exit(EXIT_FAILURE);
}
}
pTblName = childTblName + count * TSDB_TABLE_NAME_LEN;
@@ -2799,10 +3649,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
- char* sTblName, char** childTblNameOfSuperTbl,
+ char* stbName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl) {
- return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
+ return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
-1, 0);
}
@@ -2810,13 +3660,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
static int getSuperTableFromServer(TAOS * taos, char* dbName,
SSuperTable* superTbls) {
- char command[BUFFER_SIZE] = "\0";
+ char command[1024] = "\0";
TAOS_RES * res;
TAOS_ROW row = NULL;
int count = 0;
//get schema use cmd: describe superTblName;
- snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName);
+ snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -2842,6 +3692,39 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(DATATYPE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
superTbls->tags[tagIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->tags[tagIndex].note,
@@ -2853,16 +3736,51 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
tstrncpy(superTbls->columns[columnIndex].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+
tstrncpy(superTbls->columns[columnIndex].dataType,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(DATATYPE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
superTbls->columns[columnIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->columns[columnIndex].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(NOTE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+
columnIndex++;
}
count++;
@@ -2880,11 +3798,11 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
int childTblCount = 10000;
superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (superTbls->childTblName == NULL) {
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
return -1;
}
getAllChildNameOfSuperTable(taos, dbName,
- superTbls->sTblName,
+ superTbls->stbName,
&superTbls->childTblName,
&superTbls->childTblCount);
}
@@ -2896,80 +3814,102 @@ static int createSuperTable(
TAOS * taos, char* dbName,
SSuperTable* superTbl) {
- char command[BUFFER_SIZE] = "\0";
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
char cols[COL_BUFFER_LEN] = "\0";
- int colIndex;
int len = 0;
int lenOfOneRow = 0;
if (superTbl->columnCount == 0) {
- errorPrint("%s() LN%d, super table column count is %d\n",
+ errorPrint2("%s() LN%d, super table column count is %d\n",
__func__, __LINE__, superTbl->columnCount);
+ free(command);
return -1;
}
- for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
- char* dataType = superTbl->columns[colIndex].dataType;
+ for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ",C%d %s(%d)", colIndex, "BINARY",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ",C%d %s(%d)", colIndex, "NCHAR",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- if ((g_args.demo_mode) && (colIndex == 1)) {
+ switch(superTbl->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ", VOLTAGE INT");
- } else {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
- }
- lenOfOneRow += INT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "BIGINT");
- lenOfOneRow += BIGINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "SMALLINT");
- lenOfOneRow += SMALLINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
- lenOfOneRow += TINYINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
- lenOfOneRow += BOOL_BUFF_LEN;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- if (g_args.demo_mode) {
- if (colIndex == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
- } else if (colIndex == 2) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
+ ",C%d %s(%d)", colIndex, "BINARY",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ",C%d %s(%d)", colIndex, "NCHAR",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (colIndex == 1)) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ", VOLTAGE INT");
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
}
- } else {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
- }
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
- lenOfOneRow += FLOAT_BUFF_LEN;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "DOUBLE");
- lenOfOneRow += DOUBLE_BUFF_LEN;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "TIMESTAMP");
- lenOfOneRow += TIMESTAMP_BUFF_LEN;
- } else {
- taos_close(taos);
- errorPrint("%s() LN%d, config error data type : %s\n",
- __func__, __LINE__, dataType);
- exit(-1);
+ case TSDB_DATA_TYPE_BIGINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "BIGINT");
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "SMALLINT");
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (colIndex == 0) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
+ } else if (colIndex == 2) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
+ }
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
+ }
+
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "DOUBLE");
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "TIMESTAMP");
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ taos_close(taos);
+ free(command);
+ errorPrint2("%s() LN%d, config error data type : %s\n",
+ __func__, __LINE__, superTbl->columns[colIndex].dataType);
+ exit(EXIT_FAILURE);
}
}
@@ -2978,10 +3918,11 @@ static int createSuperTable(
// save for creating child table
superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1);
if (NULL == superTbl->colsOfCreateChildTable) {
- errorPrint("%s() LN%d, Failed when calloc, size:%d",
- __func__, __LINE__, len+1);
taos_close(taos);
- exit(-1);
+ free(command);
+ errorPrint2("%s() LN%d, Failed when calloc, size:%d",
+ __func__, __LINE__, len+1);
+ exit(EXIT_FAILURE);
}
snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols);
@@ -2989,8 +3930,9 @@ static int createSuperTable(
__func__, __LINE__, superTbl->colsOfCreateChildTable);
if (superTbl->tagCount == 0) {
- errorPrint("%s() LN%d, super table tag count is %d\n",
+ errorPrint2("%s() LN%d, super table tag count is %d\n",
__func__, __LINE__, superTbl->tagCount);
+ free(command);
return -1;
}
@@ -3054,9 +3996,10 @@ static int createSuperTable(
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
} else {
taos_close(taos);
- errorPrint("%s() LN%d, config error tag type : %s\n",
+ free(command);
+ errorPrint2("%s() LN%d, config error tag type : %s\n",
__func__, __LINE__, dataType);
- exit(-1);
+ exit(EXIT_FAILURE);
}
}
@@ -3066,26 +4009,28 @@ static int createSuperTable(
superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
snprintf(command, BUFFER_SIZE,
- "create table if not exists %s.%s (ts timestamp%s) tags %s",
- dbName, superTbl->sTblName, cols, tags);
+ "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s",
+ dbName, superTbl->stbName, cols, tags);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
- errorPrint( "create supertable %s failed!\n\n",
- superTbl->sTblName);
+ errorPrint2("create supertable %s failed!\n\n",
+ superTbl->stbName);
+ free(command);
return -1;
}
- debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
+
+ debugPrint("create supertable %s success!\n\n", superTbl->stbName);
+ free(command);
return 0;
}
-static int createDatabasesAndStables() {
+int createDatabasesAndStables(char *command) {
TAOS * taos = NULL;
int ret = 0;
taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
return -1;
}
- char command[BUFFER_SIZE] = "\0";
for (int i = 0; i < g_Dbs.dbCount; i++) {
if (g_Dbs.db[i].drop) {
@@ -3097,35 +4042,43 @@ static int createDatabasesAndStables() {
int dataLen = 0;
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName);
+ BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s",
+ g_Dbs.db[i].dbName);
if (g_Dbs.db[i].dbCfg.blocks > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks);
+ BUFFER_SIZE - dataLen, " BLOCKS %d",
+ g_Dbs.db[i].dbCfg.blocks);
}
if (g_Dbs.db[i].dbCfg.cache > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache);
+ BUFFER_SIZE - dataLen, " CACHE %d",
+ g_Dbs.db[i].dbCfg.cache);
}
if (g_Dbs.db[i].dbCfg.days > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days);
+ BUFFER_SIZE - dataLen, " DAYS %d",
+ g_Dbs.db[i].dbCfg.days);
}
if (g_Dbs.db[i].dbCfg.keep > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep);
+ BUFFER_SIZE - dataLen, " KEEP %d",
+ g_Dbs.db[i].dbCfg.keep);
}
if (g_Dbs.db[i].dbCfg.quorum > 1) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum);
+ BUFFER_SIZE - dataLen, " QUORUM %d",
+ g_Dbs.db[i].dbCfg.quorum);
}
if (g_Dbs.db[i].dbCfg.replica > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica);
+ BUFFER_SIZE - dataLen, " REPLICA %d",
+ g_Dbs.db[i].dbCfg.replica);
}
if (g_Dbs.db[i].dbCfg.update > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update);
+ BUFFER_SIZE - dataLen, " UPDATE %d",
+ g_Dbs.db[i].dbCfg.update);
}
//if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
// dataLen += snprintf(command + dataLen,
@@ -3133,42 +4086,46 @@ static int createDatabasesAndStables() {
//}
if (g_Dbs.db[i].dbCfg.minRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows);
+ BUFFER_SIZE - dataLen, " MINROWS %d",
+ g_Dbs.db[i].dbCfg.minRows);
}
if (g_Dbs.db[i].dbCfg.maxRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows);
+ BUFFER_SIZE - dataLen, " MAXROWS %d",
+ g_Dbs.db[i].dbCfg.maxRows);
}
if (g_Dbs.db[i].dbCfg.comp > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp);
+ BUFFER_SIZE - dataLen, " COMP %d",
+ g_Dbs.db[i].dbCfg.comp);
}
if (g_Dbs.db[i].dbCfg.walLevel > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel);
+ BUFFER_SIZE - dataLen, " wal %d",
+ g_Dbs.db[i].dbCfg.walLevel);
}
if (g_Dbs.db[i].dbCfg.cacheLast > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast);
+ BUFFER_SIZE - dataLen, " CACHELAST %d",
+ g_Dbs.db[i].dbCfg.cacheLast);
}
if (g_Dbs.db[i].dbCfg.fsync > 0) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
- " fsync %d", g_Dbs.db[i].dbCfg.fsync);
+ " FSYNC %d", g_Dbs.db[i].dbCfg.fsync);
}
- if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms")))
-#if NANO_SECOND_ENABLED == 1
+ if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
- "ns", strlen("ns")))
-#endif
+ "ns", 2))
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
- "us", strlen("us")))) {
+ "us", 2))) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
" precision \'%s\';", g_Dbs.db[i].dbCfg.precision);
}
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos);
- errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName);
+ errorPrint("\ncreate database %s failed!\n\n",
+ g_Dbs.db[i].dbName);
return -1;
}
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
@@ -3181,7 +4138,7 @@ static int createDatabasesAndStables() {
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
if ((ret != 0) || (g_Dbs.db[i].drop)) {
@@ -3197,8 +4154,8 @@ static int createDatabasesAndStables() {
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
&g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
- errorPrint("\nget super table %s.%s info failed!\n\n",
- g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
+ errorPrint2("\nget super table %s.%s info failed!\n\n",
+ g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName);
continue;
}
@@ -3215,7 +4172,7 @@ static int createDatabasesAndStables() {
static void* createTable(void *sarg)
{
threadInfo *pThreadInfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
setThreadName("createTable");
@@ -3225,8 +4182,8 @@ static void* createTable(void *sarg)
pThreadInfo->buffer = calloc(buff_len, 1);
if (pThreadInfo->buffer == NULL) {
- errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
- exit(-1);
+ errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
int len = 0;
@@ -3240,60 +4197,69 @@ static void* createTable(void *sarg)
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(pThreadInfo->buffer, buff_len,
- "create table if not exists %s.%s%"PRIu64" %s;",
+ "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
+ batchNum ++;
} else {
- if (superTblInfo == NULL) {
- errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
- __func__, __LINE__);
+ if (stbInfo == NULL) {
free(pThreadInfo->buffer);
- exit(-1);
+ errorPrint2("%s() LN%d, use metric, but super table info is NULL\n",
+ __func__, __LINE__);
+ exit(EXIT_FAILURE);
} else {
if (0 == len) {
batchNum = 0;
memset(pThreadInfo->buffer, 0, buff_len);
len += snprintf(pThreadInfo->buffer + len,
- buff_len - len, "create table ");
+ buff_len - len, "CREATE TABLE ");
}
+
char* tagsValBuf = NULL;
- if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagValuesForStb(superTblInfo, i);
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, i);
} else {
+ if (0 == stbInfo->tagSampleCount) {
+ free(pThreadInfo->buffer);
+ ERROR_EXIT("use sample file for tag, but has no content!\n");
+ }
tagsValBuf = getTagValueFromTagSample(
- superTblInfo,
- i % superTblInfo->tagSampleCount);
+ stbInfo,
+ i % stbInfo->tagSampleCount);
}
+
if (NULL == tagsValBuf) {
free(pThreadInfo->buffer);
- return NULL;
+ ERROR_EXIT("use metric, but tag buffer is NULL\n");
}
len += snprintf(pThreadInfo->buffer + len,
buff_len - len,
"if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
- pThreadInfo->db_name, superTblInfo->childTblPrefix,
+ pThreadInfo->db_name, stbInfo->childTblPrefix,
i, pThreadInfo->db_name,
- superTblInfo->sTblName, tagsValBuf);
+ stbInfo->stbName, tagsValBuf);
free(tagsValBuf);
batchNum++;
- if ((batchNum < superTblInfo->batchCreateTableNum)
+ if ((batchNum < stbInfo->batchCreateTableNum)
&& ((buff_len - len)
- >= (superTblInfo->lenOfTagOfOneRow + 256))) {
+ >= (stbInfo->lenOfTagOfOneRow + 256))) {
continue;
}
}
}
len = 0;
+
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
- NO_INSERT_TYPE, false)){
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ NO_INSERT_TYPE, false)) {
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
free(pThreadInfo->buffer);
return NULL;
}
+ pThreadInfo->tables_created += batchNum;
- uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
@@ -3304,7 +4270,7 @@ static void* createTable(void *sarg)
if (0 != len) {
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
NO_INSERT_TYPE, false)) {
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
}
}
@@ -3314,14 +4280,13 @@ static void* createTable(void *sarg)
static int startMultiThreadCreateChildTable(
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
- char* db_name, SSuperTable* superTblInfo) {
+ char* db_name, SSuperTable* stbInfo) {
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
- printf("malloc failed\n");
- exit(-1);
+ ERROR_EXIT("createChildTable malloc failed\n");
}
if (threads < 1) {
@@ -3341,7 +4306,7 @@ static int startMultiThreadCreateChildTable(
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
- pThreadInfo->superTblInfo = superTblInfo;
+ pThreadInfo->stbInfo = stbInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
pThreadInfo->taos = taos_connect(
g_Dbs.host,
@@ -3350,7 +4315,7 @@ static int startMultiThreadCreateChildTable(
db_name,
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
- errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n",
__func__, __LINE__, taos_errstr(NULL));
free(pids);
free(infos);
@@ -3364,6 +4329,7 @@ static int startMultiThreadCreateChildTable(
pThreadInfo->use_metric = true;
pThreadInfo->cols = cols;
pThreadInfo->minDelay = UINT64_MAX;
+ pThreadInfo->tables_created = 0;
pthread_create(pids + i, NULL, createTable, pThreadInfo);
}
@@ -3374,6 +4340,8 @@ static int startMultiThreadCreateChildTable(
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
+
+ g_actualChildTables += pThreadInfo->tables_created;
}
free(pids);
@@ -3400,14 +4368,13 @@ static void createChildTables() {
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
uint64_t startFrom = 0;
- g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
__func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable(
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
- g_Dbs.threadCountByCreateTbl,
+ g_Dbs.threadCountForCreateTbl,
startFrom,
g_Dbs.db[i].superTbls[j].childTblCount,
g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
@@ -3416,15 +4383,15 @@ static void createChildTables() {
} else {
// normal table
len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP");
- for (int j = 0; j < g_args.num_of_CPR; j++) {
- if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
- || (strncasecmp(g_args.datatype[j],
+ for (int j = 0; j < g_args.columnCount; j++) {
+ if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0)
+ || (strncasecmp(g_args.dataType[j],
"NCHAR", strlen("NCHAR")) == 0)) {
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
- ",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
+ ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth);
} else {
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
- ",C%d %s", j, g_args.datatype[j]);
+ ",C%d %s", j, g_args.dataType[j]);
}
len = strlen(tblColsBuf);
}
@@ -3433,12 +4400,12 @@ static void createChildTables() {
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
__func__, __LINE__,
- g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
+ g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf);
startMultiThreadCreateChildTable(
tblColsBuf,
- g_Dbs.threadCountByCreateTbl,
+ g_Dbs.threadCountForCreateTbl,
0,
- g_args.num_of_tables,
+ g_args.ntables,
g_Dbs.db[i].dbName,
NULL);
}
@@ -3448,26 +4415,26 @@ static void createChildTables() {
/*
Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/
-static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
+static int readTagFromCsvFileToMem(SSuperTable * stbInfo) {
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
- FILE *fp = fopen(superTblInfo->tagsFile, "r");
+ FILE *fp = fopen(stbInfo->tagsFile, "r");
if (fp == NULL) {
printf("Failed to open tags file: %s, reason:%s\n",
- superTblInfo->tagsFile, strerror(errno));
+ stbInfo->tagsFile, strerror(errno));
return -1;
}
- if (superTblInfo->tagDataBuf) {
- free(superTblInfo->tagDataBuf);
- superTblInfo->tagDataBuf = NULL;
+ if (stbInfo->tagDataBuf) {
+ free(stbInfo->tagDataBuf);
+ stbInfo->tagDataBuf = NULL;
}
int tagCount = 10000;
int count = 0;
- char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount);
+ char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount);
if (tagDataBuf == NULL) {
printf("Failed to calloc, reason:%s\n", strerror(errno));
fclose(fp);
@@ -3483,20 +4450,20 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
continue;
}
- memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen);
+ memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen);
count++;
if (count >= tagCount - 1) {
char *tmp = realloc(tagDataBuf,
- (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow);
+ (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow);
if (tmp != NULL) {
tagDataBuf = tmp;
tagCount = (int)(tagCount*1.5);
- memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow,
- 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow));
+ memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow,
+ 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow));
} else {
// exit, if allocate more memory failed
- printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile);
+ printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile);
tmfree(tagDataBuf);
free(line);
fclose(fp);
@@ -3505,8 +4472,8 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
}
}
- superTblInfo->tagDataBuf = tagDataBuf;
- superTblInfo->tagSampleCount = count;
+ stbInfo->tagDataBuf = tagDataBuf;
+ stbInfo->tagSampleCount = count;
free(line);
fclose(fp);
@@ -3516,29 +4483,29 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) {
/*
Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/
-static int readSampleFromCsvFileToMem(
- SSuperTable* superTblInfo) {
+static int generateSampleFromCsvForStb(
+ SSuperTable* stbInfo) {
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int getRows = 0;
- FILE* fp = fopen(superTblInfo->sampleFile, "r");
+ FILE* fp = fopen(stbInfo->sampleFile, "r");
if (fp == NULL) {
- errorPrint( "Failed to open sample file: %s, reason:%s\n",
- superTblInfo->sampleFile, strerror(errno));
+ errorPrint("Failed to open sample file: %s, reason:%s\n",
+ stbInfo->sampleFile, strerror(errno));
return -1;
}
- assert(superTblInfo->sampleDataBuf);
- memset(superTblInfo->sampleDataBuf, 0,
- MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow);
+ assert(stbInfo->sampleDataBuf);
+ memset(stbInfo->sampleDataBuf, 0,
+ MAX_SAMPLES * stbInfo->lenOfOneRow);
while(1) {
readLen = tgetline(&line, &n, fp);
if (-1 == readLen) {
if(0 != fseek(fp, 0, SEEK_SET)) {
- errorPrint( "Failed to fseek file: %s, reason:%s\n",
- superTblInfo->sampleFile, strerror(errno));
+ errorPrint("Failed to fseek file: %s, reason:%s\n",
+ stbInfo->sampleFile, strerror(errno));
fclose(fp);
return -1;
}
@@ -3553,17 +4520,17 @@ static int readSampleFromCsvFileToMem(
continue;
}
- if (readLen > superTblInfo->lenOfOneRow) {
+ if (readLen > stbInfo->lenOfOneRow) {
printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
- (int32_t)readLen, superTblInfo->lenOfOneRow);
+ (int32_t)readLen, stbInfo->lenOfOneRow);
continue;
}
- memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow,
+ memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow,
line, readLen);
getRows++;
- if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
+ if (getRows == MAX_SAMPLES) {
break;
}
}
@@ -3580,7 +4547,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// columns
cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
if (columns && columns->type != cJSON_Array) {
- printf("ERROR: failed to read json, columns not found\n");
+ errorPrint("%s", "failed to read json, columns not found\n");
goto PARSE_OVER;
} else if (NULL == columns) {
superTbls->columnCount = 0;
@@ -3590,8 +4557,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
int columnSize = cJSON_GetArraySize(columns);
if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("failed to read json, column size overflow, max column size is %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
@@ -3609,8 +4576,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column count not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3621,8 +4587,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(column, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d: failed to read json, column type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column type not found\n");
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
@@ -3644,33 +4609,69 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
tstrncpy(superTbls->columns[index].dataType,
columnCase.dataType,
min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
}
if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_NUM_COLUMNS);
+ errorPrint("failed to read json, column size overflow, allowed max column size is %d\n",
+ MAX_NUM_COLUMNS);
goto PARSE_OVER;
}
superTbls->columnCount = index;
+ for (int c = 0; c < superTbls->columnCount; c++) {
+ if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
count = 1;
index = 0;
// tags
cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
if (!tags || tags->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, tags not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tags not found\n");
goto PARSE_OVER;
}
int tagSize = cJSON_GetArraySize(tags);
if (tagSize > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, max tag size is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
@@ -3684,7 +4685,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- printf("ERROR: failed to read json, column count not found\n");
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3695,8 +4696,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(tag, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, tag type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tag type not found\n");
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring,
@@ -3706,8 +4706,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (dataLen && dataLen->type == cJSON_Number) {
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column len not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column len not found\n");
goto PARSE_OVER;
} else {
columnCase.dataLen = 0;
@@ -3722,16 +4721,52 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
if (index > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
superTbls->tagCount = index;
+ for (int t = 0; t < superTbls->tagCount; t++) {
+ if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("columns + tags is more than allowed max columns count: %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
ret = true;
@@ -3754,7 +4789,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -3774,9 +4809,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
- tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
- tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN);
}
cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
@@ -3792,51 +4827,46 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!threads) {
g_Dbs.threadCount = 1;
} else {
- printf("ERROR: failed to read json, threads not found\n");
+ errorPrint("%s", "failed to read json, threads not found\n");
goto PARSE_OVER;
}
cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl");
if (threads2 && threads2->type == cJSON_Number) {
- g_Dbs.threadCountByCreateTbl = threads2->valueint;
+ g_Dbs.threadCountForCreateTbl = threads2->valueint;
} else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = 1;
+ g_Dbs.threadCountForCreateTbl = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads2 not found\n");
goto PARSE_OVER;
}
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
if (gInsertInterval->valueint <0) {
- errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert interval input mistake\n");
goto PARSE_OVER;
}
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
goto PARSE_OVER;
}
- g_args.interlace_rows = interlaceRows->valueint;
+ g_args.interlaceRows = interlaceRows->valueint;
} else if (!interlaceRows) {
- g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
+ g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
goto PARSE_OVER;
}
@@ -3870,9 +4900,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
prompt();
numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
}
- g_args.num_of_RPR = numRecPerReq->valueint;
+ g_args.reqPerReq = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = MAX_RECORDS_PER_REQ;
+ g_args.reqPerReq = MAX_RECORDS_PER_REQ;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
@@ -3898,25 +4928,25 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
// rows per table need be less than insert batch
- if (g_args.interlace_rows > g_args.num_of_RPR) {
+ if (g_args.interlaceRows > g_args.reqPerReq) {
printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n",
- g_args.interlace_rows, g_args.num_of_RPR);
+ g_args.interlaceRows, g_args.reqPerReq);
printf(" interlace rows value will be set to num_of_records_per_req %u\n\n",
- g_args.num_of_RPR);
+ g_args.reqPerReq);
prompt();
- g_args.interlace_rows = g_args.num_of_RPR;
+ g_args.interlaceRows = g_args.reqPerReq;
}
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (!dbs || dbs->type != cJSON_Array) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
int dbSize = cJSON_GetArraySize(dbs);
if (dbSize > MAX_DB_COUNT) {
errorPrint(
- "ERROR: failed to read json, databases size overflow, max database is %d\n",
+ "failed to read json, databases size overflow, max database is %d\n",
MAX_DB_COUNT);
goto PARSE_OVER;
}
@@ -3929,13 +4959,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
// dbinfo
cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
if (!dbinfo || dbinfo->type != cJSON_Object) {
- printf("ERROR: failed to read json, dbinfo not found\n");
+ errorPrint("%s", "failed to read json, dbinfo not found\n");
goto PARSE_OVER;
}
cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
- printf("ERROR: failed to read json, db name not found\n");
+ errorPrint("%s", "failed to read json, db name not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
@@ -3950,8 +4980,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!drop) {
g_Dbs.db[i].drop = g_args.drop_database;
} else {
- errorPrint("%s() LN%d, failed to read json, drop input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, drop input mistake\n");
goto PARSE_OVER;
}
@@ -3963,7 +4992,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!precision) {
memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, precision not found\n");
+ errorPrint("%s", "failed to read json, precision not found\n");
goto PARSE_OVER;
}
@@ -3973,7 +5002,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!update) {
g_Dbs.db[i].dbCfg.update = -1;
} else {
- printf("ERROR: failed to read json, update not found\n");
+ errorPrint("%s", "failed to read json, update not found\n");
goto PARSE_OVER;
}
@@ -3983,7 +5012,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!replica) {
g_Dbs.db[i].dbCfg.replica = -1;
} else {
- printf("ERROR: failed to read json, replica not found\n");
+ errorPrint("%s", "failed to read json, replica not found\n");
goto PARSE_OVER;
}
@@ -3993,7 +5022,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!keep) {
g_Dbs.db[i].dbCfg.keep = -1;
} else {
- printf("ERROR: failed to read json, keep not found\n");
+ errorPrint("%s", "failed to read json, keep not found\n");
goto PARSE_OVER;
}
@@ -4003,7 +5032,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!days) {
g_Dbs.db[i].dbCfg.days = -1;
} else {
- printf("ERROR: failed to read json, days not found\n");
+ errorPrint("%s", "failed to read json, days not found\n");
goto PARSE_OVER;
}
@@ -4013,7 +5042,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cache) {
g_Dbs.db[i].dbCfg.cache = -1;
} else {
- printf("ERROR: failed to read json, cache not found\n");
+ errorPrint("%s", "failed to read json, cache not found\n");
goto PARSE_OVER;
}
@@ -4023,7 +5052,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!blocks) {
g_Dbs.db[i].dbCfg.blocks = -1;
} else {
- printf("ERROR: failed to read json, block not found\n");
+ errorPrint("%s", "failed to read json, block not found\n");
goto PARSE_OVER;
}
@@ -4043,7 +5072,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!minRows) {
g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, minRows not found\n");
+ errorPrint("%s", "failed to read json, minRows not found\n");
goto PARSE_OVER;
}
@@ -4053,7 +5082,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxRows) {
g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, maxRows not found\n");
+ errorPrint("%s", "failed to read json, maxRows not found\n");
goto PARSE_OVER;
}
@@ -4063,7 +5092,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!comp) {
g_Dbs.db[i].dbCfg.comp = -1;
} else {
- printf("ERROR: failed to read json, comp not found\n");
+ errorPrint("%s", "failed to read json, comp not found\n");
goto PARSE_OVER;
}
@@ -4073,7 +5102,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!walLevel) {
g_Dbs.db[i].dbCfg.walLevel = -1;
} else {
- printf("ERROR: failed to read json, walLevel not found\n");
+ errorPrint("%s", "failed to read json, walLevel not found\n");
goto PARSE_OVER;
}
@@ -4083,7 +5112,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cacheLast) {
g_Dbs.db[i].dbCfg.cacheLast = -1;
} else {
- printf("ERROR: failed to read json, cacheLast not found\n");
+ errorPrint("%s", "failed to read json, cacheLast not found\n");
goto PARSE_OVER;
}
@@ -4103,24 +5132,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!fsync) {
g_Dbs.db[i].dbCfg.fsync = -1;
} else {
- errorPrint("%s() LN%d, failed to read json, fsync input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, fsync input mistake\n");
goto PARSE_OVER;
}
- // super_talbes
+ // super_tables
cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
if (!stables || stables->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super_tables not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super_tables not found\n");
goto PARSE_OVER;
}
int stbSize = cJSON_GetArraySize(stables);
if (stbSize > MAX_SUPER_TABLE_COUNT) {
errorPrint(
- "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n",
- __func__, __LINE__, MAX_SUPER_TABLE_COUNT);
+ "failed to read json, supertable size overflow, max supertable is %d\n",
+ MAX_SUPER_TABLE_COUNT);
goto PARSE_OVER;
}
@@ -4133,16 +5160,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
if (!stbName || stbName->type != cJSON_String
|| stbName->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, stb name not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stb name not found\n");
goto PARSE_OVER;
}
- tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
+ tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring,
TSDB_TABLE_NAME_LEN);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
- printf("ERROR: failed to read json, childtable_prefix not found\n");
+ errorPrint("%s", "failed to read json, childtable_prefix not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
@@ -4163,7 +5189,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!autoCreateTbl) {
g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
} else {
- printf("ERROR: failed to read json, auto_create_table not found\n");
+ errorPrint("%s", "failed to read json, auto_create_table not found\n");
goto PARSE_OVER;
}
@@ -4171,9 +5197,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
} else if (!batchCreateTbl) {
- g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000;
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10;
} else {
- printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
+ errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
}
@@ -4193,8 +5219,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!childTblExists) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
- errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, child_table_exists not found\n");
goto PARSE_OVER;
}
@@ -4204,11 +5230,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, childtable_count input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
+ g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
if (dataSource && dataSource->type == cJSON_String
@@ -4220,8 +5247,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
min(SMALL_BUFF_LEN, strlen("rand") + 1));
} else {
- errorPrint("%s() LN%d, failed to read json, data_source not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, data_source not found\n");
goto PARSE_OVER;
}
@@ -4232,13 +5258,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE;
} else if (0 == strcasecmp(stbIface->valuestring, "rest")) {
g_Dbs.db[i].superTbls[j].iface= REST_IFACE;
-#if STMT_IFACE_ENABLED == 1
} else if (0 == strcasecmp(stbIface->valuestring, "stmt")) {
g_Dbs.db[i].superTbls[j].iface= STMT_IFACE;
-#endif
} else {
- errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n",
- __func__, __LINE__, stbIface->valuestring);
+ errorPrint("failed to read json, insert_mode %s not recognized\n",
+ stbIface->valuestring);
goto PARSE_OVER;
}
} else if (!stbIface) {
@@ -4252,7 +5276,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
- printf("ERROR: failed to read json, childtable_limit\n");
+ errorPrint("%s", "failed to read json, childtable_limit\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
@@ -4265,7 +5289,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if ((childTbl_offset->type != cJSON_Number)
|| (0 > childTbl_offset->valueint)) {
- printf("ERROR: failed to read json, childtable_offset\n");
+ errorPrint("%s", "failed to read json, childtable_offset\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint;
@@ -4281,7 +5305,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
"now", TSDB_DB_NAME_LEN);
} else {
- printf("ERROR: failed to read json, start_timestamp not found\n");
+ errorPrint("%s", "failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
}
@@ -4291,7 +5315,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!timestampStep) {
g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
} else {
- printf("ERROR: failed to read json, timestamp_step not found\n");
+ errorPrint("%s", "failed to read json, timestamp_step not found\n");
goto PARSE_OVER;
}
@@ -4306,7 +5330,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, sample_format not found\n");
+ errorPrint("%s", "failed to read json, sample_format not found\n");
goto PARSE_OVER;
}
@@ -4321,7 +5345,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, sample_file not found\n");
+ errorPrint("%s", "failed to read json, sample_file not found\n");
goto PARSE_OVER;
}
@@ -4339,7 +5363,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
g_Dbs.db[i].superTbls[j].tagSource = 0;
} else {
- printf("ERROR: failed to read json, tags_file not found\n");
+ errorPrint("%s", "failed to read json, tags_file not found\n");
goto PARSE_OVER;
}
@@ -4355,8 +5379,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxSqlLen) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
- errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n");
goto PARSE_OVER;
}
/*
@@ -4373,31 +5396,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!multiThreadWriteOneTbl) {
g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
} else {
- printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n");
+ errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n");
goto PARSE_OVER;
}
*/
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
if (insertRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) {
if (stbInterlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
@@ -4415,8 +5435,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint(
- "%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ "%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
@@ -4432,7 +5451,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRatio) {
g_Dbs.db[i].superTbls[j].disorderRatio = 0;
} else {
- printf("ERROR: failed to read json, disorderRatio not found\n");
+ errorPrint("%s", "failed to read json, disorderRatio not found\n");
goto PARSE_OVER;
}
@@ -4442,7 +5461,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRange) {
g_Dbs.db[i].superTbls[j].disorderRange = 1000;
} else {
- printf("ERROR: failed to read json, disorderRange not found\n");
+ errorPrint("%s", "failed to read json, disorderRange not found\n");
goto PARSE_OVER;
}
@@ -4450,17 +5469,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
if (insertInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
+ verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
@@ -4492,7 +5509,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -4512,9 +5529,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
- tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE);
+ tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
- tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);;
+ tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
@@ -4530,23 +5547,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!answerPrompt) {
g_args.answer_yes = false;
} else {
- printf("ERROR: failed to read json, confirm_parameter_prompt not found\n");
+ errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n");
goto PARSE_OVER;
}
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
if (gQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s()", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
@@ -4554,7 +5569,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
@@ -4568,7 +5583,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.queryMode, "taosc",
min(SMALL_BUFF_LEN, strlen("taosc") + 1));
} else {
- printf("ERROR: failed to read json, query_mode not found\n");
+ errorPrint("%s", "failed to read json, query_mode not found\n");
goto PARSE_OVER;
}
@@ -4578,7 +5593,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, super_table_query not found\n");
+ errorPrint("%s", "failed to read json, super_table_query not found\n");
goto PARSE_OVER;
} else {
cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval");
@@ -4593,8 +5608,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
if (specifiedQueryTimes->valueint <= 0) {
errorPrint(
- "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, specifiedQueryTimes->valueint);
+ "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ specifiedQueryTimes->valueint);
goto PARSE_OVER;
}
@@ -4611,8 +5626,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (concurrent && concurrent->type == cJSON_Number) {
if (concurrent->valueint <= 0) {
errorPrint(
- "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
- __func__, __LINE__,
+ "query sqlCount %d or concurrent %d is not correct.\n",
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
@@ -4630,8 +5644,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4654,7 +5667,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", restart->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4670,7 +5683,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", keepProgress->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe keepProgress error\n");
+ errorPrint("%s", "failed to read json, subscribe keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4682,15 +5695,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!specifiedSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(specifiedSqls);
if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
> MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
- __func__, __LINE__,
+ errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
superSqlSize,
g_queryInfo.specifiedQueryInfo.concurrent,
MAX_QUERY_SQL_COUNT);
@@ -4704,7 +5715,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("ERROR: failed to read json, sql not found\n");
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
@@ -4744,7 +5755,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
memset(g_queryInfo.specifiedQueryInfo.result[j],
0, MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, super query result file not found\n");
+ errorPrint("%s",
+ "failed to read json, super query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4757,7 +5769,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, sub_table_query not found\n");
+ errorPrint("%s", "failed to read json, sub_table_query not found\n");
ret = true;
goto PARSE_OVER;
} else {
@@ -4771,24 +5783,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
if (superQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, superQueryTimes->valueint);
+ errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ superQueryTimes->valueint);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
if (threads->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads input mistake\n");
goto PARSE_OVER;
}
@@ -4807,11 +5817,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname");
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
- tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
+ tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring,
TSDB_TABLE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, super table name input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super table name input error\n");
goto PARSE_OVER;
}
@@ -4823,8 +5832,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", superAsyncMode->valuestring)) {
g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4834,8 +5842,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
if (superInterval && superInterval->type == cJSON_Number) {
if (superInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interval input mistake\n");
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
@@ -4853,7 +5860,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4869,7 +5876,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", superkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe super table keepProgress error\n");
+ errorPrint("%s",
+ "failed to read json, subscribe super table keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4906,14 +5914,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!superSqls) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
- __func__, __LINE__, MAX_QUERY_SQL_COUNT);
+ errorPrint("failed to read json, query sql size overflow, max is %d\n",
+ MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4925,8 +5932,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String
|| sqlStr->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, sql not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
@@ -4934,14 +5940,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
- && result->valuestring != NULL){
+ && result->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.result[j],
result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, sub query result file not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sub query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4959,7 +5964,7 @@ static bool getInfoFromJsonFile(char* file) {
FILE *fp = fopen(file, "r");
if (!fp) {
- printf("failed to read %s, reason:%s\n", file, strerror(errno));
+ errorPrint("failed to read %s, reason:%s\n", file, strerror(errno));
return false;
}
@@ -4970,14 +5975,14 @@ static bool getInfoFromJsonFile(char* file) {
if (len <= 0) {
free(content);
fclose(fp);
- printf("failed to read %s, content is null", file);
+ errorPrint("failed to read %s, content is null", file);
return false;
}
content[len] = 0;
cJSON* root = cJSON_Parse(content);
if (root == NULL) {
- printf("ERROR: failed to cjson parse %s, invalid json format\n", file);
+ errorPrint("failed to cjson parse %s, invalid json format\n", file);
goto PARSE_OVER;
}
@@ -4990,13 +5995,13 @@ static bool getInfoFromJsonFile(char* file) {
} else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
g_args.test_mode = SUBSCRIBE_TEST;
} else {
- printf("ERROR: failed to read json, filetype not support\n");
+ errorPrint("%s", "failed to read json, filetype not support\n");
goto PARSE_OVER;
}
} else if (!filetype) {
g_args.test_mode = INSERT_TEST;
} else {
- printf("ERROR: failed to read json, filetype not found\n");
+ errorPrint("%s", "failed to read json, filetype not found\n");
goto PARSE_OVER;
}
@@ -5006,8 +6011,8 @@ static bool getInfoFromJsonFile(char* file) {
|| (SUBSCRIBE_TEST == g_args.test_mode)) {
ret = getMetaFromQueryJsonFile(root);
} else {
- errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "input json file type error! please input correct file type: insert or query or subscribe\n");
goto PARSE_OVER;
}
@@ -5034,22 +6039,37 @@ static int prepareSampleData() {
static void postFreeResource() {
tmfclose(g_fpOfInsertResult);
+
for (int i = 0; i < g_Dbs.dbCount; i++) {
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) {
- free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL;
}
if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) {
- free(g_Dbs.db[i].superTbls[j].sampleDataBuf);
+ tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf);
g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL;
}
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int c = 0;
+ c < g_Dbs.db[i].superTbls[j].columnCount; c ++) {
+
+ if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) {
+
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_Dbs.db[i].superTbls[j].sampleBindBatchArray
+ + sizeof(char*) * c)));
+ }
+ }
+ tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray);
+#endif
if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) {
- free(g_Dbs.db[i].superTbls[j].tagDataBuf);
+ tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf);
g_Dbs.db[i].superTbls[j].tagDataBuf = NULL;
}
if (0 != g_Dbs.db[i].superTbls[j].childTblName) {
- free(g_Dbs.db[i].superTbls[j].childTblName);
+ tmfree(g_Dbs.db[i].superTbls[j].childTblName);
g_Dbs.db[i].superTbls[j].childTblName = NULL;
}
}
@@ -5064,21 +6084,27 @@ static void postFreeResource() {
tmfree(g_randfloat_buff);
tmfree(g_rand_current_buff);
tmfree(g_rand_phase_buff);
- tmfree(g_randdouble_buff);
+
+ tmfree(g_sampleDataBuf);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int l = 0;
+ l < g_args.columnCount; l ++) {
+ if (g_sampleBindBatchArray) {
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_sampleBindBatchArray
+ + sizeof(char*) * l)));
+ }
+ }
+ tmfree(g_sampleBindBatchArray);
+#endif
}
static int getRowDataFromSample(
char* dataBuf, int64_t maxLen, int64_t timestamp,
- SSuperTable* superTblInfo, int64_t* sampleUsePos)
+ SSuperTable* stbInfo, int64_t* sampleUsePos)
{
- if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
- /* int ret = readSampleFromCsvFileToMem(superTblInfo);
- if (0 != ret) {
- tmfree(superTblInfo->sampleDataBuf);
- superTblInfo->sampleDataBuf = NULL;
- return -1;
- }
- */
+ if ((*sampleUsePos) == MAX_SAMPLES) {
*sampleUsePos = 0;
}
@@ -5088,8 +6114,8 @@ static int getRowDataFromSample(
"(%" PRId64 ", ", timestamp);
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen,
"%s",
- superTblInfo->sampleDataBuf
- + superTblInfo->lenOfOneRow * (*sampleUsePos));
+ stbInfo->sampleDataBuf
+ + stbInfo->lenOfOneRow * (*sampleUsePos));
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
(*sampleUsePos)++;
@@ -5109,15 +6135,16 @@ static int64_t generateStbRowData(
int tmpLen;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "(%" PRId64 ",", timestamp);
+ "(%" PRId64 "", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType,
- "BINARY", 6))
- || (0 == strncasecmp(stbInfo->columns[i].dataType,
- "NCHAR", 5))) {
+ tstrncpy(pstr + dataLen, ",", 2);
+ dataLen += 1;
+
+ if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY)
+ || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary or nchar length overflow, max size:%u\n",
+ errorPrint2("binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5129,84 +6156,95 @@ static int64_t generateStbRowData(
}
char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
if (NULL == buf) {
- errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf);
tmfree(buf);
} else {
- char *tmp;
+ char *tmp = NULL;
+ switch(stbInfo->columns[i].data_type) {
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (i == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
+ break;
- if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "INT", 3)) {
- if ((g_args.demo_mode) && (i == 1)) {
- tmp = demo_voltage_int_str();
- } else {
- tmp = rand_int_str();
- }
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BIGINT", 6)) {
- tmp = rand_bigint_str();
- tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN);
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "FLOAT", 5)) {
- if (g_args.demo_mode) {
- if (i == 0) {
- tmp = demo_current_float_str();
+ case TSDB_DATA_TYPE_BIGINT:
+ tmp = rand_bigint_str();
+ tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (i == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
} else {
- tmp = demo_phase_float_str();
+ tmp = rand_float_str();
}
- } else {
- tmp = rand_float_str();
- }
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "DOUBLE", 6)) {
- tmp = rand_double_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "SMALLINT", 8)) {
- tmp = rand_smallint_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp,
- min(tmpLen + 1, SMALLINT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TINYINT", 7)) {
- tmp = rand_tinyint_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BOOL", 4)) {
- tmp = rand_bool_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TIMESTAMP", 9)) {
- tmp = rand_bigint_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN));
- } else {
- errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
- return -1;
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmp = rand_double_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ tmp = rand_smallint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp,
+ min(tmpLen + 1, SMALLINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ tmp = rand_tinyint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmp = rand_bool_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmp = rand_bigint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %s\n",
+ stbInfo->columns[i].dataType);
+ exit(EXIT_FAILURE);
}
- dataLen += strlen(tmp);
- tstrncpy(pstr + dataLen, ",", 2);
- dataLen += 1;
+ if (tmp) {
+ dataLen += strlen(tmp);
+ }
}
if (dataLen > (remainderBufLen - (128)))
return 0;
}
- dataLen -= 1;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
+ tstrncpy(pstr + dataLen, ")", 2);
verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen);
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
@@ -5214,58 +6252,87 @@ static int64_t generateStbRowData(
return strlen(recBuf);
}
-static int64_t generateData(char *recBuf, char **data_type,
+static int64_t generateData(char *recBuf, char *data_type,
int64_t timestamp, int lenOfBinary) {
memset(recBuf, 0, MAX_DATA_SIZE);
char *pstr = recBuf;
- pstr += sprintf(pstr, "(%" PRId64, timestamp);
+ pstr += sprintf(pstr, "(%"PRId64"", timestamp);
- int columnCount = g_args.num_of_CPR;
+ int columnCount = g_args.columnCount;
+ bool b;
+ char *s;
for (int i = 0; i < columnCount; i++) {
- if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_smallint());
- } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_int());
- } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) {
- pstr += sprintf(pstr, ",%10.4f", rand_float());
- } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) {
- double t = rand_double();
- pstr += sprintf(pstr, ",%20.8f", t);
- } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) {
- bool b = rand_bool() & 1;
- pstr += sprintf(pstr, ",%s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) {
- char *s = malloc(lenOfBinary + 1);
- if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
- __func__, __LINE__, lenOfBinary + 1);
- exit(-1);
- }
- rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ",\"%s\"", s);
- free(s);
- } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) {
- char *s = malloc(lenOfBinary + 1);
- if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
- __func__, __LINE__, lenOfBinary + 1);
- exit(-1);
- }
- rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ",\"%s\"", s);
- free(s);
+ switch (data_type[i]) {
+ case TSDB_DATA_TYPE_TINYINT:
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ pstr += sprintf(pstr, ",%d", rand_int());
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pstr += sprintf(pstr, ",%20.8f", rand_double());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ b = rand_bool() & 1;
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %d\n",
+ __func__, __LINE__,
+ data_type[i]);
+ exit(EXIT_FAILURE);
}
if (strlen(recBuf) > MAX_DATA_SIZE) {
- perror("column length too long, abort");
- exit(-1);
+ ERROR_EXIT("column length too long, abort");
}
}
@@ -5276,27 +6343,166 @@ static int64_t generateData(char *recBuf, char **data_type,
return (int32_t)strlen(recBuf);
}
-static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
- char* sampleDataBuf = NULL;
+static int generateSampleFromRand(
+ char *sampleDataBuf,
+ uint64_t lenOfOneRow,
+ int columnCount,
+ StrColumn *columns
+ )
+{
+ char data[MAX_DATA_SIZE];
+ memset(data, 0, MAX_DATA_SIZE);
+
+ char *buff = malloc(lenOfOneRow);
+ if (NULL == buff) {
+ errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n",
+ __func__, __LINE__, lenOfOneRow);
+ exit(EXIT_FAILURE);
+ }
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ uint64_t pos = 0;
+ memset(buff, 0, lenOfOneRow);
+
+ for (int c = 0; c < columnCount; c++) {
+ char *tmp = NULL;
+
+ uint32_t dataLen;
+ char data_type = (columns)?(columns[c].data_type):g_args.data_type[c];
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (c == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (c == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
+ } else {
+ tmp = rand_float_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pos += sprintf(buff + pos, "%s,", rand_double_str());
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pos += sprintf(buff + pos, "%s,", rand_smallint_str());
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ pos += sprintf(buff + pos, "%s,", rand_tinyint_str());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ pos += sprintf(buff + pos, "%s,", rand_bool_str());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %s\n",
+ __func__, __LINE__,
+ (columns)?(columns[c].dataType):g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ *(buff + pos - 1) = 0;
+ memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos);
+ }
+
+ free(buff);
+ return 0;
+}
+
+static int generateSampleFromRandForNtb()
+{
+ return generateSampleFromRand(
+ g_sampleDataBuf,
+ g_args.lenOfOneRow,
+ g_args.columnCount,
+ NULL);
+}
+
+static int generateSampleFromRandForStb(SSuperTable *stbInfo)
+{
+ return generateSampleFromRand(
+ stbInfo->sampleDataBuf,
+ stbInfo->lenOfOneRow,
+ stbInfo->columnCount,
+ stbInfo->columns);
+}
+
+static int prepareSampleForNtb() {
+ g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == g_sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__,
+ g_args.lenOfOneRow * MAX_SAMPLES,
+ strerror(errno));
+ return -1;
+ }
+
+ return generateSampleFromRandForNtb();
+}
+
+static int prepareSampleForStb(SSuperTable *stbInfo) {
- sampleDataBuf = calloc(
- superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
- if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ stbInfo->sampleDataBuf = calloc(
+ stbInfo->lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == stbInfo->sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
- superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
+ stbInfo->lenOfOneRow * MAX_SAMPLES,
strerror(errno));
return -1;
}
- superTblInfo->sampleDataBuf = sampleDataBuf;
- int ret = readSampleFromCsvFileToMem(superTblInfo);
+ int ret;
+ if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) {
+ ret = generateSampleFromCsvForStb(stbInfo);
+ } else {
+ ret = generateSampleFromRandForStb(stbInfo);
+ }
if (0 != ret) {
- errorPrint("%s() LN%d, read sample from csv file failed.\n",
+ errorPrint2("%s() LN%d, read sample from csv file failed.\n",
__func__, __LINE__);
- tmfree(sampleDataBuf);
- superTblInfo->sampleDataBuf = NULL;
+ tmfree(stbInfo->sampleDataBuf);
+ stbInfo->sampleDataBuf = NULL;
return -1;
}
@@ -5306,14 +6512,11 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
{
int32_t affectedRows;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
-
- verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
- __func__, __LINE__, pThreadInfo->buffer);
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
uint16_t iface;
- if (superTblInfo)
- iface = superTblInfo->iface;
+ if (stbInfo)
+ iface = stbInfo->iface;
else {
if (g_args.iface == INTERFACE_BUT)
iface = TAOSC_IFACE;
@@ -5328,12 +6531,18 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
switch(iface) {
case TAOSC_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
affectedRows = queryDbExec(
pThreadInfo->taos,
pThreadInfo->buffer, INSERT_TYPE, false);
break;
case REST_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
pThreadInfo->buffer, pThreadInfo)) {
affectedRows = -1;
@@ -5344,24 +6553,22 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
}
break;
-#if STMT_IFACE_ENABLED == 1
case STMT_IFACE:
debugPrint("%s() LN%d, stmt=%p",
__func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
- errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
+ errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
- exit(-1);
+ exit(EXIT_FAILURE);
}
affectedRows = k;
break;
-#endif
default:
- errorPrint("%s() LN%d: unknown insert mode: %d\n",
- __func__, __LINE__, superTblInfo->iface);
+ errorPrint2("%s() LN%d: unknown insert mode: %d\n",
+ __func__, __LINE__, stbInfo->iface);
affectedRows = 0;
}
@@ -5371,24 +6578,24 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
static void getTableName(char *pTblName,
threadInfo* pThreadInfo, uint64_t tableSeq)
{
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- if (superTblInfo) {
- if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) {
- if (superTblInfo->childTblLimit > 0) {
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ if (stbInfo) {
+ if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) {
+ if (stbInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
- superTblInfo->childTblName +
- (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
+ stbInfo->childTblName +
+ (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
- superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
+ stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
- superTblInfo->childTblPrefix, tableSeq);
+ stbInfo->childTblPrefix, tableSeq);
}
} else {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
@@ -5414,8 +6621,8 @@ static int32_t generateDataTailWithoutStb(
int64_t retLen = 0;
- char **data_type = g_args.datatype;
- int lenOfBinary = g_args.len_of_binary;
+ char *data_type = g_args.data_type;
+ int lenOfBinary = g_args.binwidth;
if (g_args.disorderRatio) {
retLen = generateData(data, data_type,
@@ -5469,7 +6676,7 @@ static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
}
static int32_t generateStbDataTail(
- SSuperTable* superTblInfo,
+ SSuperTable* stbInfo,
uint32_t batch, char* buffer,
int64_t remainderBufLen, int64_t insertRows,
uint64_t recordFrom, int64_t startTime,
@@ -5479,7 +6686,7 @@ static int32_t generateStbDataTail(
char *pstr = buffer;
bool tsRand;
- if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) {
+ if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
tsRand = true;
} else {
tsRand = false;
@@ -5494,26 +6701,26 @@ static int32_t generateStbDataTail(
int64_t lenOfRow = 0;
if (tsRand) {
- if (superTblInfo->disorderRatio > 0) {
- lenOfRow = generateStbRowData(superTblInfo, data,
+ if (stbInfo->disorderRatio > 0) {
+ lenOfRow = generateStbRowData(stbInfo, data,
remainderBufLen,
startTime + getTSRandTail(
- superTblInfo->timeStampStep, k,
- superTblInfo->disorderRatio,
- superTblInfo->disorderRange)
+ stbInfo->timeStampStep, k,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange)
);
} else {
- lenOfRow = generateStbRowData(superTblInfo, data,
+ lenOfRow = generateStbRowData(stbInfo, data,
remainderBufLen,
- startTime + superTblInfo->timeStampStep * k
+ startTime + stbInfo->timeStampStep * k
);
}
} else {
lenOfRow = getRowDataFromSample(
data,
(remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE,
- startTime + superTblInfo->timeStampStep * k,
- superTblInfo,
+ startTime + stbInfo->timeStampStep * k,
+ stbInfo,
pSamplePos);
}
@@ -5569,7 +6776,7 @@ static int generateSQLHeadWithoutStb(char *tableName,
}
static int generateStbSQLHead(
- SSuperTable* superTblInfo,
+ SSuperTable* stbInfo,
char *tableName, int64_t tableSeq,
char *dbName,
char *buffer, int remainderBufLen)
@@ -5578,17 +6785,17 @@ static int generateStbSQLHead(
char headBuf[HEAD_BUFF_LEN];
- if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) {
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
char* tagsValBuf = NULL;
- if (0 == superTblInfo->tagSource) {
- tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq);
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
} else {
tagsValBuf = getTagValueFromTagSample(
- superTblInfo,
- tableSeq % superTblInfo->tagSampleCount);
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -5600,10 +6807,10 @@ static int generateStbSQLHead(
dbName,
tableName,
dbName,
- superTblInfo->sTblName,
+ stbInfo->stbName,
tagsValBuf);
tmfree(tagsValBuf);
- } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) {
+ } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) {
len = snprintf(
headBuf,
HEAD_BUFF_LEN,
@@ -5628,12 +6835,12 @@ static int generateStbSQLHead(
}
static int32_t generateStbInterlaceData(
- SSuperTable *superTblInfo,
+ threadInfo *pThreadInfo,
char *tableName, uint32_t batchPerTbl,
uint64_t i,
uint32_t batchPerTblTimes,
uint64_t tableSeq,
- threadInfo *pThreadInfo, char *buffer,
+ char *buffer,
int64_t insertRows,
int64_t startTime,
uint64_t *pRemainderBufLen)
@@ -5641,8 +6848,9 @@ static int32_t generateStbInterlaceData(
assert(buffer);
char *pstr = buffer;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
int headLen = generateStbSQLHead(
- superTblInfo,
+ stbInfo,
tableName, tableSeq, pThreadInfo->db_name,
pstr, *pRemainderBufLen);
@@ -5662,12 +6870,12 @@ static int32_t generateStbInterlaceData(
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
- if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
+ if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) {
startTime = taosGetTimestamp(pThreadInfo->time_precision);
}
int32_t k = generateStbDataTail(
- superTblInfo,
+ stbInfo,
batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
startTime,
&(pThreadInfo->samplePos), &dataLen);
@@ -5729,222 +6937,495 @@ static int64_t generateInterlaceDataWithoutStb(
return k;
}
-#if STMT_IFACE_ENABLED == 1
-static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind,
- char *dataType, int32_t dataLen, char **ptr, char *value)
+static int32_t prepareStmtBindArrayByType(
+ TAOS_BIND *bind,
+ char data_type, int32_t dataLen,
+ int32_t timePrec,
+ char *value)
{
- if (0 == strncasecmp(dataType,
- "BINARY", strlen("BINARY"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_binary = (char *)*ptr;
-
- bind->buffer_type = TSDB_DATA_TYPE_BINARY;
- if (value) {
- strncpy(bind_binary, value, strlen(value));
- bind->buffer_length = strlen(bind_binary);
- } else {
- rand_string(bind_binary, dataLen);
- bind->buffer_length = dataLen;
- }
+ int32_t *bind_int;
+ int64_t *bind_bigint;
+ float *bind_float;
+ double *bind_double;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+ int16_t *bind_smallint;
+ int8_t *bind_tinyint;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary;
- bind->length = &bind->buffer_length;
- bind->buffer = bind_binary;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ bind_binary = calloc(1, strlen(value) + 1);
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ bind_binary = calloc(1, dataLen + 1);
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "NCHAR", strlen("NCHAR"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_nchar = (char *)*ptr;
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
+ break;
- bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
- if (value) {
- strncpy(bind_nchar, value, strlen(value));
- } else {
- rand_string(bind_nchar, dataLen);
- }
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar;
- bind->buffer_length = strlen(bind_nchar);
- bind->buffer = bind_nchar;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ bind_nchar = calloc(1, strlen(value) + 1);
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ bind_nchar = calloc(1, dataLen + 1);
+ rand_string(bind_nchar, dataLen);
+ }
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "INT", strlen("INT"))) {
- int32_t *bind_int = (int32_t *)*ptr;
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_int = atoi(value);
- } else {
- *bind_int = rand_int();
- }
- bind->buffer_type = TSDB_DATA_TYPE_INT;
- bind->buffer_length = sizeof(int32_t);
- bind->buffer = bind_int;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_INT:
+ bind_int = malloc(sizeof(int32_t));
+ assert(bind_int);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "BIGINT", strlen("BIGINT"))) {
- int64_t *bind_bigint = (int64_t *)*ptr;
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_bigint = atoll(value);
- } else {
- *bind_bigint = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_bigint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = malloc(sizeof(int64_t));
+ assert(bind_bigint);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "FLOAT", strlen("FLOAT"))) {
- float *bind_float = (float *) *ptr;
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_float = (float)atof(value);
- } else {
- *bind_float = rand_float();
- }
- bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
- bind->buffer_length = sizeof(float);
- bind->buffer = bind_float;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = malloc(sizeof(float));
+ assert(bind_float);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "DOUBLE", strlen("DOUBLE"))) {
- double *bind_double = (double *)*ptr;
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_double = atof(value);
- } else {
- *bind_double = rand_double();
- }
- bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
- bind->buffer_length = sizeof(double);
- bind->buffer = bind_double;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = malloc(sizeof(double));
+ assert(bind_double);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "SMALLINT", strlen("SMALLINT"))) {
- int16_t *bind_smallint = (int16_t *)*ptr;
+ if (value) {
+ *bind_double = atof(value);
+ } else {
+ *bind_double = rand_double();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_smallint = (int16_t)atoi(value);
- } else {
- *bind_smallint = rand_smallint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
- bind->buffer_length = sizeof(int16_t);
- bind->buffer = bind_smallint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = malloc(sizeof(int16_t));
+ assert(bind_smallint);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "TINYINT", strlen("TINYINT"))) {
- int8_t *bind_tinyint = (int8_t *)*ptr;
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_tinyint = (int8_t)atoi(value);
- } else {
- *bind_tinyint = rand_tinyint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_tinyint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "BOOL", strlen("BOOL"))) {
- int8_t *bind_bool = (int8_t *)*ptr;
-
- *bind_bool = rand_bool();
- bind->buffer_type = TSDB_DATA_TYPE_BOOL;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_bool;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = malloc(sizeof(int8_t));
+ assert(bind_tinyint);
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "TIMESTAMP", strlen("TIMESTAMP"))) {
- int64_t *bind_ts2 = (int64_t *) *ptr;
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_ts2 = atoll(value);
- } else {
- *bind_ts2 = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_ts2;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = malloc(sizeof(int8_t));
+ assert(bind_bool);
- *ptr += bind->buffer_length;
- } else {
- errorPrint( "No support data type: %s\n", dataType);
- return -1;
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
+ }
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = malloc(sizeof(int64_t));
+ assert(bind_ts2);
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ free(bind_ts2);
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
+ }
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %d\n", data_type);
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+static int32_t prepareStmtBindArrayByTypeForRand(
+ TAOS_BIND *bind,
+ char data_type, int32_t dataLen,
+ int32_t timePrec,
+ char **ptr,
+ char *value)
+{
+ int32_t *bind_int;
+ int64_t *bind_bigint;
+ float *bind_float;
+ double *bind_double;
+ int16_t *bind_smallint;
+ int8_t *bind_tinyint;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary = (char *)*ptr;
+
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
+
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size: %u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar = (char *)*ptr;
+
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ rand_string(bind_nchar, dataLen);
+ }
+
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ bind_int = (int32_t *)*ptr;
+
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = (int64_t *)*ptr;
+
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = (float *)*ptr;
+
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = (double *)*ptr;
+
+ if (value) {
+ *bind_double = atof(value);
+ } else {
+ *bind_double = rand_double();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = (int16_t *)*ptr;
+
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = (int8_t *)*ptr;
+
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = (int8_t *)*ptr;
+
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
+ }
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = (int64_t *)*ptr;
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
+ }
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ default:
+ errorPrint2("No support data type: %d\n", data_type);
+ return -1;
}
return 0;
}
static int32_t prepareStmtWithoutStb(
- TAOS_STMT *stmt,
+ threadInfo *pThreadInfo,
char *tableName,
uint32_t batch,
int64_t insertRows,
int64_t recordFrom,
int64_t startTime)
{
+ TAOS_STMT *stmt = pThreadInfo->stmt;
int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) {
- errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
+ errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
tableName, ret, taos_stmt_errstr(stmt));
return ret;
}
- char **data_type = g_args.datatype;
+ char *data_type = g_args.data_type;
- char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1));
+ char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1));
if (bindArray == NULL) {
- errorPrint("Failed to allocate %d bind params\n",
- (g_args.num_of_CPR + 1));
+ errorPrint2("Failed to allocate %d bind params\n",
+ (g_args.columnCount + 1));
return -1;
}
int32_t k = 0;
for (k = 0; k < batch;) {
/* columnCount + 1 (ts) */
- char data[MAX_DATA_SIZE];
- memset(data, 0, MAX_DATA_SIZE);
- char *ptr = data;
TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0);
- int64_t *bind_ts;
+ int64_t *bind_ts = pThreadInfo->bind_ts;
- bind_ts = (int64_t *)ptr;
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
if (g_args.disorderRatio) {
@@ -5960,27 +7441,26 @@ static int32_t prepareStmtWithoutStb(
bind->length = &bind->buffer_length;
bind->is_null = NULL;
- ptr += bind->buffer_length;
-
- for (int i = 0; i < g_args.num_of_CPR; i ++) {
+ for (int i = 0; i < g_args.columnCount; i ++) {
bind = (TAOS_BIND *)((char *)bindArray
+ (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType(
bind,
data_type[i],
- g_args.len_of_binary,
- &ptr, NULL)) {
+ g_args.binwidth,
+ pThreadInfo->time_precision,
+ NULL)) {
return -1;
}
}
if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
// if msg > 3MB, break
if (0 != taos_stmt_add_batch(stmt)) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
@@ -5996,146 +7476,101 @@ static int32_t prepareStmtWithoutStb(
return k;
}
-static int32_t prepareStbStmtBind(
- char *bindArray, SSuperTable *stbInfo, bool sourceRand,
- int64_t startTime, int32_t recSeq,
- bool isColumn)
+static int32_t prepareStbStmtBindTag(
+ char *bindArray, SSuperTable *stbInfo,
+ char *tagsVal,
+ int32_t timePrec)
{
- char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
- if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
- __func__, __LINE__, g_args.len_of_binary);
- return -1;
+ TAOS_BIND *tag;
+
+ for (int t = 0; t < stbInfo->tagCount; t ++) {
+ tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t));
+ if ( -1 == prepareStmtBindArrayByType(
+ tag,
+ stbInfo->tags[t].data_type,
+ stbInfo->tags[t].dataLen,
+ timePrec,
+ NULL)) {
+ return -1;
+ }
}
+ return 0;
+}
+
+static int32_t prepareStbStmtBindRand(
+ int64_t *ts,
+ char *bindArray, SSuperTable *stbInfo,
+ int64_t startTime, int32_t recSeq,
+ int32_t timePrec)
+{
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
char *ptr = data;
TAOS_BIND *bind;
- if (isColumn) {
- int cursor = 0;
-
- for (int i = 0; i < stbInfo->columnCount + 1; i ++) {
- bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
+ for (int i = 0; i < stbInfo->columnCount + 1; i ++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
- if (i == 0) {
- int64_t *bind_ts;
+ if (i == 0) {
+ int64_t *bind_ts = ts;
- bind_ts = (int64_t *)ptr;
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- if (stbInfo->disorderRatio) {
- *bind_ts = startTime + getTSRandTail(
- stbInfo->timeStampStep, recSeq,
- stbInfo->disorderRatio,
- stbInfo->disorderRange);
- } else {
- *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
- }
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_ts;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
-
- ptr += bind->buffer_length;
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ if (stbInfo->disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ stbInfo->timeStampStep, recSeq,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
} else {
-
- if (sourceRand) {
- if ( -1 == prepareStmtBindArrayByType(
- bind,
- stbInfo->columns[i-1].dataType,
- stbInfo->columns[i-1].dataLen,
- &ptr,
- NULL)) {
- free(bindBuffer);
- return -1;
- }
- } else {
- char *restStr = stbInfo->sampleDataBuf + cursor;
- int lengthOfRest = strlen(restStr);
-
- int index = 0;
- for (index = 0; index < lengthOfRest; index ++) {
- if (restStr[index] == ',') {
- break;
- }
- }
-
- memset(bindBuffer, 0, g_args.len_of_binary);
- strncpy(bindBuffer, restStr, index);
- cursor += index + 1; // skip ',' too
-
- if ( -1 == prepareStmtBindArrayByType(
- bind,
- stbInfo->columns[i-1].dataType,
- stbInfo->columns[i-1].dataLen,
- &ptr,
- bindBuffer)) {
- free(bindBuffer);
- return -1;
- }
- }
- }
- }
- } else {
- TAOS_BIND *tag;
-
- for (int t = 0; t < stbInfo->tagCount; t ++) {
- tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t));
- if ( -1 == prepareStmtBindArrayByType(
- tag,
- stbInfo->tags[t].dataType,
- stbInfo->tags[t].dataLen,
- &ptr,
- NULL)) {
- free(bindBuffer);
- return -1;
- }
+ *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
+ }
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ ptr += bind->buffer_length;
+ } else if ( -1 == prepareStmtBindArrayByTypeForRand(
+ bind,
+ stbInfo->columns[i-1].data_type,
+ stbInfo->columns[i-1].dataLen,
+ timePrec,
+ &ptr,
+ NULL)) {
+ return -1;
}
-
}
- free(bindBuffer);
return 0;
}
-static int32_t prepareStbStmt(
- SSuperTable *stbInfo,
- TAOS_STMT *stmt,
+UNUSED_FUNC static int32_t prepareStbStmtRand(
+ threadInfo *pThreadInfo,
char *tableName,
int64_t tableSeq,
uint32_t batch,
uint64_t insertRows,
uint64_t recordFrom,
- int64_t startTime,
- int64_t *pSamplePos)
+ int64_t startTime)
{
int ret;
-
- bool sourceRand;
- if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
- sourceRand = true;
- } else {
- sourceRand = false; // from sample data file
- }
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
char* tagsValBuf = NULL;
- bool tagRand;
if (0 == stbInfo->tagSource) {
- tagRand = true;
tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
} else {
- tagRand = false;
tagsValBuf = getTagValueFromTagSample(
stbInfo,
tableSeq % stbInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6143,13 +7578,14 @@ static int32_t prepareStbStmt(
char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
if (NULL == tagsArray) {
tmfree(tagsValBuf);
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
- if (-1 == prepareStbStmtBind(
- tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) {
+ if (-1 == prepareStbStmtBindTag(
+ tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
+ /* is tag */)) {
tmfree(tagsValBuf);
tmfree(tagsArray);
return -1;
@@ -6161,14 +7597,14 @@ static int32_t prepareStbStmt(
tmfree(tagsArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
} else {
ret = taos_stmt_set_tbname(stmt, tableName);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6176,7 +7612,7 @@ static int32_t prepareStbStmt(
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
__func__, __LINE__, (stbInfo->columnCount + 1));
return -1;
}
@@ -6184,14 +7620,18 @@ static int32_t prepareStbStmt(
uint32_t k;
for (k = 0; k < batch;) {
/* columnCount + 1 (ts) */
- if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand,
- startTime, k, true /* is column */)) {
+ if (-1 == prepareStbStmtBindRand(
+ pThreadInfo->bind_ts,
+ bindArray, stbInfo,
+ startTime, k,
+ pThreadInfo->time_precision
+ /* is column */)) {
free(bindArray);
return -1;
}
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6199,7 +7639,7 @@ static int32_t prepareStbStmt(
// if msg > 3MB, break
ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6208,10 +7648,6 @@ static int32_t prepareStbStmt(
k++;
recordFrom ++;
- if (!sourceRand) {
- (*pSamplePos) ++;
- }
-
if (recordFrom >= insertRows) {
break;
}
@@ -6221,9 +7657,9 @@ static int32_t prepareStbStmt(
return k;
}
-static int32_t prepareStbStmtInterlace(
- SSuperTable *stbInfo,
- TAOS_STMT *stmt,
+#if STMT_BIND_PARAM_BATCH == 1
+static int execBindParamBatch(
+ threadInfo *pThreadInfo,
char *tableName,
int64_t tableSeq,
uint32_t batch,
@@ -6232,143 +7668,1269 @@ static int32_t prepareStbStmtInterlace(
int64_t startTime,
int64_t *pSamplePos)
{
- return prepareStbStmt(
- stbInfo,
- stmt,
- tableName,
- tableSeq,
- batch,
- insertRows, 0, startTime,
- pSamplePos);
-}
+ int ret;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
-static int32_t prepareStbStmtProgressive(
- SSuperTable *stbInfo,
- TAOS_STMT *stmt,
- char *tableName,
- int64_t tableSeq,
- uint32_t batch,
- uint64_t insertRows,
- uint64_t recordFrom,
- int64_t startTime,
- int64_t *pSamplePos)
-{
- return prepareStbStmt(
- stbInfo,
- stmt,
- tableName,
- tableSeq,
- g_args.num_of_RPR,
- insertRows, recordFrom, startTime,
- pSamplePos);
-}
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ uint32_t columnCount = (stbInfo)?pThreadInfo->stbInfo->columnCount:g_args.columnCount;
-#endif
+ uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos);
-static int32_t generateStbProgressiveData(
- SSuperTable *superTblInfo,
- char *tableName,
- int64_t tableSeq,
- char *dbName, char *buffer,
- int64_t insertRows,
- uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos,
- int64_t *pRemainderBufLen)
-{
- assert(buffer != NULL);
- char *pstr = buffer;
+ if (thisBatch > batch) {
+ thisBatch = batch;
+ }
+ verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n",
+ __func__, __LINE__, batch, *pSamplePos, thisBatch);
- memset(pstr, 0, *pRemainderBufLen);
+ memset(pThreadInfo->bindParams, 0,
+ (sizeof(TAOS_MULTI_BIND) * (columnCount + 1)));
+ memset(pThreadInfo->is_null, 0, thisBatch);
- int64_t headLen = generateStbSQLHead(
- superTblInfo,
- tableName, tableSeq, dbName,
- buffer, *pRemainderBufLen);
+ for (int c = 0; c < columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+
+ char data_type;
+
+ if (c == 0) {
+ data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = pThreadInfo->bind_ts_array;
+
+ } else {
+ data_type = (stbInfo)?stbInfo->columns[c-1].data_type:g_args.data_type[c-1];
+
+ char *tmpP;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ param->buffer_length =
+ ((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth);
+
+ tmpP =
+ (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1)));
+
+ verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%d position=%"PRId64"\n",
+ __func__, __LINE__, tmpP, *pSamplePos,
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)),
+ (*pSamplePos) *
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)));
+
+ param->buffer = (void *)(tmpP + *pSamplePos *
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth))
+ );
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ param->buffer_length = sizeof(int32_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int32_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(
+ stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen*(*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(
+ g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int8_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ param->buffer_length = sizeof(int16_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int16_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int64_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int8_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ param->buffer_length = sizeof(float);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(float)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ param->buffer_length = sizeof(double);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(double)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int64_t)*(*pSamplePos));
+ break;
+
+ default:
+ errorPrint("%s() LN%d, wrong data type: %d\n",
+ __func__,
+ __LINE__,
+ data_type);
+ exit(EXIT_FAILURE);
+
+ }
+ }
+
+ param->buffer_type = data_type;
+ param->length = malloc(sizeof(int32_t) * thisBatch);
+ assert(param->length);
+
+ for (int b = 0; b < thisBatch; b++) {
+ if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) {
+ param->length[b] = strlen(
+ (char *)param->buffer + b *
+ ((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)
+ );
+ } else {
+ param->length[b] = param->buffer_length;
+ }
+ }
+ param->is_null = pThreadInfo->is_null;
+ param->num = thisBatch;
+ }
+
+ uint32_t k;
+ for (k = 0; k < thisBatch;) {
+ /* columnCount + 1 (ts) */
+ if (stbInfo->disorderRatio) {
+ *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail(
+ stbInfo->timeStampStep, k,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k;
+ }
+
+ debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n",
+ __func__, __LINE__,
+ k, *(pThreadInfo->bind_ts_array +k));
+ k++;
+ recordFrom ++;
+
+ (*pSamplePos) ++;
+ if ((*pSamplePos) == MAX_SAMPLES) {
+ *pSamplePos = 0;
+ }
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+
+ for (int c = 0; c < stbInfo->columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+ free(param->length);
+ }
+
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ return k;
+}
+
+static int parseSamplefileToStmtBatch(
+ SSuperTable* stbInfo)
+{
+ // char *sampleDataBuf = (stbInfo)?
+ // stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleBindBatchArray = NULL;
+
+ if (stbInfo) {
+ stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = stbInfo->sampleBindBatchArray;
+ } else {
+ g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = g_sampleBindBatchArray;
+ }
+ assert(sampleBindBatchArray);
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c];
+
+ char *tmpP = NULL;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ tmpP = calloc(1, sizeof(int) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ tmpP = calloc(1, sizeof(float) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmpP = calloc(1, sizeof(double) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = calloc(1, MAX_SAMPLES *
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)));
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ default:
+ errorPrint("Unknown data type: %s\n",
+ (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c].data_type:
+ g_args.data_type[c];
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *tmpStr = calloc(1, index + 1);
+ if (NULL == tmpStr) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(tmpStr, restStr, index);
+ cursor += index + 1; // skip ',' too
+ char *tmpP;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int32_t)*i)) =
+ atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(float)*i)) =
+ (float)atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(double)*i)) =
+ atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int16_t)*i)) =
+ (int16_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c));
+ strcpy(tmpP + i*
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))
+ , tmpStr);
+ break;
+
+ default:
+ break;
+ }
+
+ free(tmpStr);
+ }
+ }
+
+ return 0;
+}
+
+static int parseSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+
+ pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch);
+ assert(pThreadInfo->bind_ts_array);
+
+ pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1));
+ assert(pThreadInfo->bindParams);
+
+ pThreadInfo->is_null = malloc(batch);
+ assert(pThreadInfo->is_null);
+
+ return 0;
+}
+
+static int parseStbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+}
+
+static int parseNtbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, NULL, timePrec, batch);
+}
+
+#else
+static int parseSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ pThreadInfo->sampleBindArray =
+ calloc(1, sizeof(char *) * MAX_SAMPLES);
+ if (pThreadInfo->sampleBindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
+ __func__, __LINE__,
+ (uint64_t)sizeof(char *) * MAX_SAMPLES);
+ return -1;
+ }
+
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ char *bindArray =
+ calloc(1, sizeof(TAOS_BIND) * (columnCount + 1));
+ if (bindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
+ __func__, __LINE__, (columnCount + 1));
+ return -1;
+ }
+
+ TAOS_BIND *bind;
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount + 1; c++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c));
+
+ if (c == 0) {
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = NULL; //bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ } else {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c-1].data_type:
+ g_args.data_type[c-1];
+ int32_t dataLen = (stbInfo)?
+ stbInfo->columns[c-1].dataLen:
+ g_args.binwidth;
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *bindBuffer = calloc(1, index + 1);
+ if (bindBuffer == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(bindBuffer, restStr, index);
+ cursor += index + 1; // skip ',' too
+
+ if (-1 == prepareStmtBindArrayByType(
+ bind,
+ data_type,
+ dataLen,
+ timePrec,
+ bindBuffer)) {
+ free(bindBuffer);
+ return -1;
+ }
+ free(bindBuffer);
+ }
+ }
+ *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) =
+ (uintptr_t)bindArray;
+ }
+
+ return 0;
+}
+
+static int parseStbSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ stbInfo, timePrec);
+}
+
+static int parseNtbSampleToStmt(
+ threadInfo *pThreadInfo,
+ uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ NULL,
+ timePrec);
+}
+
+static int32_t prepareStbStmtBindStartTime(
+ char *tableName,
+ int64_t *ts,
+ char *bindArray, SSuperTable *stbInfo,
+ int64_t startTime, int32_t recSeq)
+{
+ TAOS_BIND *bind;
+
+ bind = (TAOS_BIND *)bindArray;
+
+ int64_t *bind_ts = ts;
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ if (stbInfo->disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ stbInfo->timeStampStep, recSeq,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
+ }
+
+ verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n",
+ __func__, __LINE__, tableName, *bind_ts);
+
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ return 0;
+}
+
+static uint32_t execBindParam(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ uint32_t k;
+ for (k = 0; k < batch;) {
+ char *bindArray = (char *)(*((uintptr_t *)
+ (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos))));
+ /* columnCount + 1 (ts) */
+ if (-1 == prepareStbStmtBindStartTime(
+ tableName,
+ pThreadInfo->bind_ts,
+ bindArray, stbInfo,
+ startTime, k
+ /* is column */)) {
+ return -1;
+ }
+ ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+
+ k++;
+ recordFrom ++;
+
+ (*pSamplePos) ++;
+ if ((*pSamplePos) == MAX_SAMPLES) {
+ *pSamplePos = 0;
+ }
+
+ if (recordFrom >= insertRows) {
+ break;
+ }
+ }
+
+ return k;
+}
+#endif
+
+static int32_t prepareStbStmtWithSample(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
+ }
+
+ if (NULL == tagsValBuf) {
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
+ if (NULL == tagsArray) {
+ tmfree(tagsValBuf);
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ if (-1 == prepareStbStmtBindTag(
+ tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
+ /* is tag */)) {
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+ return -1;
+ }
+
+ ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
+
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ } else {
+ ret = taos_stmt_set_tbname(stmt, tableName);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ }
+
+#if STMT_BIND_PARAM_BATCH == 1
+ return execBindParamBatch(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#else
+ return execBindParam(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#endif
+}
+
+static int32_t generateStbProgressiveData(
+ SSuperTable *stbInfo,
+ char *tableName,
+ int64_t tableSeq,
+ char *dbName, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos,
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(pstr, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateStbSQLHead(
+ stbInfo,
+ tableName, tableSeq, dbName,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateStbDataTail(stbInfo,
+ g_args.reqPerReq, pstr, *pRemainderBufLen,
+ insertRows, recordFrom,
+ startTime,
+ pSamplePos, &dataLen);
+}
+
+static int32_t generateProgressiveDataWithoutStb(
+ char *tableName,
+ /* int64_t tableSeq, */
+ threadInfo *pThreadInfo, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(buffer, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateSQLHeadWithoutStb(
+ tableName, pThreadInfo->db_name,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateDataTailWithoutStb(
+ g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom,
+ startTime,
+ /*pSamplePos, */&dataLen);
+}
+
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ if (0 == pThreadInfo->totalDelay)
+ pThreadInfo->totalDelay = 1;
+
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0))
+ );
+}
+
+#if STMT_BIND_PARAM_BATCH == 1
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ uint64_t maxSqlLen;
+ int64_t nTimeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ nTimeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ maxSqlLen = g_args.max_sql_len;
+ nTimeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint32_t batchPerTbl = interlaceRows;
+ uint32_t batchPerTblTimes;
+
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ g_args.reqPerReq / interlaceRows;
+ } else {
+ batchPerTblTimes = 1;
+ }
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime = pThreadInfo->start_time;
+
+ uint64_t generatedRecPerTbl = 0;
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ uint32_t recOfBatch = 0;
+
+ int32_t generated;
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ if (stbInfo) {
+ generated = prepareStbStmtWithSample(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batchPerTbl,
+ insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos));
+ } else {
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batchPerTbl, startTime);
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batchPerTbl,
+ insertRows, i,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ tableSeq ++;
+ recOfBatch += batchPerTbl;
+
+ pThreadInfo->totalInsertRows += batchPerTbl;
+
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
+
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
+
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * nTimeStampStep;
+
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
+
+ int64_t remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
+
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
+
+ startTs = taosGetTimestampUs();
+
+ if (recOfBatch == 0) {
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ goto free_of_interlace_stmt;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (recOfBatch != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows);
+ goto free_of_interlace_stmt;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+#else
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ uint64_t maxSqlLen;
+ int64_t nTimeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ nTimeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ maxSqlLen = g_args.max_sql_len;
+ nTimeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint32_t batchPerTbl = interlaceRows;
+ uint32_t batchPerTblTimes;
+
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ g_args.reqPerReq / interlaceRows;
+ } else {
+ batchPerTblTimes = 1;
+ }
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime = pThreadInfo->start_time;
+
+ uint64_t generatedRecPerTbl = 0;
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ uint32_t recOfBatch = 0;
+
+ int32_t generated;
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ if (stbInfo) {
+ generated = prepareStbStmtWithSample(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batchPerTbl,
+ insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos));
+ } else {
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batchPerTbl, startTime);
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batchPerTbl,
+ insertRows, i,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ tableSeq ++;
+ recOfBatch += batchPerTbl;
+
+ pThreadInfo->totalInsertRows += batchPerTbl;
+
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
+
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
+
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * nTimeStampStep;
+
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
+
+ int64_t remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
+
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
+
+ startTs = taosGetTimestampUs();
+
+ if (recOfBatch == 0) {
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ goto free_of_interlace_stmt;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
- if (headLen <= 0) {
- return 0;
- }
- pstr += headLen;
- *pRemainderBufLen -= headLen;
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
- int64_t dataLen;
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
- return generateStbDataTail(superTblInfo,
- g_args.num_of_RPR, pstr, *pRemainderBufLen,
- insertRows, recordFrom,
- startTime,
- pSamplePos, &dataLen);
-}
+ if (recOfBatch != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows);
+ goto free_of_interlace_stmt;
+ }
-static int32_t generateProgressiveDataWithoutStb(
- char *tableName,
- /* int64_t tableSeq, */
- threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
- uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
- int64_t *pRemainderBufLen)
-{
- assert(buffer != NULL);
- char *pstr = buffer;
+ pThreadInfo->totalAffectedRows += affectedRows;
- memset(buffer, 0, *pRemainderBufLen);
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
- int64_t headLen = generateSQLHeadWithoutStb(
- tableName, pThreadInfo->db_name,
- buffer, *pRemainderBufLen);
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
- if (headLen <= 0) {
- return 0;
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
}
- pstr += headLen;
- *pRemainderBufLen -= headLen;
-
- int64_t dataLen;
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
- return generateDataTailWithoutStb(
- g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom,
- startTime,
- /*pSamplePos, */&dataLen);
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
}
-static void printStatPerThread(threadInfo *pThreadInfo)
-{
- fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows,
- (pThreadInfo->totalDelay)?
- (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)):
- FLT_MAX);
-}
+#endif
// sync write interlace data
-static void* syncWriteInterlace(threadInfo *pThreadInfo) {
+static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
int64_t insertRows;
- uint32_t interlaceRows;
uint64_t maxSqlLen;
int64_t nTimeStampStep;
uint64_t insert_interval;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
-
- if (superTblInfo) {
- insertRows = superTblInfo->insertRows;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
- if ((superTblInfo->interlaceRows == 0)
- && (g_args.interlace_rows > 0)) {
- interlaceRows = g_args.interlace_rows;
- } else {
- interlaceRows = superTblInfo->interlaceRows;
- }
- maxSqlLen = superTblInfo->maxSqlLen;
- nTimeStampStep = superTblInfo->timeStampStep;
- insert_interval = superTblInfo->insertInterval;
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ nTimeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
} else {
- insertRows = g_args.num_of_DPT;
- interlaceRows = g_args.interlace_rows;
+ insertRows = g_args.insertRows;
maxSqlLen = g_args.max_sql_len;
nTimeStampStep = g_args.timestamp_step;
insert_interval = g_args.insert_interval;
@@ -6378,26 +8940,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
-
- if (interlaceRows > insertRows)
- interlaceRows = insertRows;
-
- if (interlaceRows > g_args.num_of_RPR)
- interlaceRows = g_args.num_of_RPR;
+#if 1
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
uint32_t batchPerTbl = interlaceRows;
uint32_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
- g_args.num_of_RPR / interlaceRows;
+ g_args.reqPerReq / interlaceRows;
} else {
batchPerTblTimes = 1;
}
+#else
+ uint32_t batchPerTbl;
+ if (interlaceRows > g_args.reqPerReq)
+ batchPerTbl = g_args.reqPerReq;
+ else
+ batchPerTbl = interlaceRows;
+
+ uint32_t batchPerTblTimes;
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ interlaceRows / batchPerTbl;
+ } else {
+ batchPerTblTimes = 1;
+ }
+#endif
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -6419,11 +8993,15 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
bool flagSleep = true;
uint64_t sleepTimeTotal = 0;
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
st = taosGetTimestampMs();
flagSleep = false;
}
+
// generate data
memset(pThreadInfo->buffer, 0, maxSqlLen);
uint64_t remainderBufLen = maxSqlLen;
@@ -6437,12 +9015,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint32_t recOfBatch = 0;
+ int32_t generated;
for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
char tableName[TSDB_TABLE_NAME_LEN];
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -6450,63 +9029,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint64_t oldRemainderLen = remainderBufLen;
- int32_t generated;
- if (superTblInfo) {
- if (superTblInfo->iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
- generated = prepareStbStmtInterlace(
- superTblInfo,
- pThreadInfo->stmt,
- tableName,
- tableSeq,
- batchPerTbl,
- insertRows, i,
- startTime,
- &(pThreadInfo->samplePos));
-#else
- generated = -1;
-#endif
- } else {
- generated = generateStbInterlaceData(
- superTblInfo,
- tableName, batchPerTbl, i,
- batchPerTblTimes,
- tableSeq,
- pThreadInfo, pstr,
- insertRows,
- startTime,
- &remainderBufLen);
- }
+ if (stbInfo) {
+ generated = generateStbInterlaceData(
+ pThreadInfo,
+ tableName, batchPerTbl, i,
+ batchPerTblTimes,
+ tableSeq,
+ pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
} else {
- if (g_args.iface == STMT_IFACE) {
- debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
- pThreadInfo->threadID,
- __func__, __LINE__,
- tableName, batchPerTbl, startTime);
-#if STMT_IFACE_ENABLED == 1
- generated = prepareStmtWithoutStb(
- pThreadInfo->stmt, tableName,
- batchPerTbl,
- insertRows, i,
- startTime);
-#else
- generated = -1;
-#endif
- } else {
- generated = generateInterlaceDataWithoutStb(
- tableName, batchPerTbl,
- tableSeq,
- pThreadInfo->db_name, pstr,
- insertRows,
- startTime,
- &remainderBufLen);
- }
+ generated = generateInterlaceDataWithoutStb(
+ tableName, batchPerTbl,
+ tableSeq,
+ pThreadInfo->db_name, pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
}
debugPrint("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
- errorPrint("[%d] %s() LN%d, generated records is %d\n",
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
goto free_of_interlace;
} else if (generated == 0) {
@@ -6539,7 +9085,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if ((remainRows > 0) && (batchPerTbl > remainRows))
batchPerTbl = remainRows;
- if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
break;
}
@@ -6547,7 +9093,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__,
generatedRecPerTbl, insertRows);
- if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl)
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
break;
}
@@ -6560,7 +9106,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampUs();
if (recOfBatch == 0) {
- errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl);
if (batchPerTbl > 0) {
@@ -6587,7 +9133,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (recOfBatch != affectedRows) {
- errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, pThreadInfo->buffer);
goto free_of_interlace;
@@ -6595,6 +9141,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalAffectedRows += affectedRows;
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
@@ -6616,6 +9167,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
}
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
free_of_interlace:
tmfree(pThreadInfo->buffer);
@@ -6627,18 +9180,18 @@ free_of_interlace:
static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len;
int64_t timeStampStep =
- superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step;
+ stbInfo?stbInfo->timeStampStep:g_args.timestamp_step;
int64_t insertRows =
- (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
+ (stbInfo)?stbInfo->insertRows:g_args.insertRows;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
__func__, __LINE__, insertRows);
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
@@ -6653,6 +9206,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
for (uint64_t tableSeq = pThreadInfo->start_table_from;
tableSeq <= pThreadInfo->end_table_to;
tableSeq ++) {
@@ -6665,7 +9221,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__,
pThreadInfo->threadID, tableSeq, tableName);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -6680,41 +9236,38 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
+ // measure prepare + insert
+ startTs = taosGetTimestampUs();
+
int32_t generated;
- if (superTblInfo) {
- if (superTblInfo->iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
- generated = prepareStbStmtProgressive(
- superTblInfo,
- pThreadInfo->stmt,
+ if (stbInfo) {
+ if (stbInfo->iface == STMT_IFACE) {
+ generated = prepareStbStmtWithSample(
+ pThreadInfo,
tableName,
tableSeq,
- g_args.num_of_RPR,
+ (g_args.reqPerReq>stbInfo->insertRows)?
+ stbInfo->insertRows:
+ g_args.reqPerReq,
insertRows, i, start_time,
&(pThreadInfo->samplePos));
-#else
- generated = -1;
-#endif
} else {
generated = generateStbProgressiveData(
- superTblInfo,
- tableName, tableSeq, pThreadInfo->db_name, pstr,
+ stbInfo,
+ tableName, tableSeq,
+ pThreadInfo->db_name, pstr,
insertRows, i, start_time,
&(pThreadInfo->samplePos),
&remainderBufLen);
}
} else {
if (g_args.iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
generated = prepareStmtWithoutStb(
- pThreadInfo->stmt,
+ pThreadInfo,
tableName,
- g_args.num_of_RPR,
+ g_args.reqPerReq,
insertRows, i,
start_time);
-#else
- generated = -1;
-#endif
} else {
generated = generateProgressiveDataWithoutStb(
tableName,
@@ -6725,6 +9278,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
&remainderBufLen);
}
}
+
+ verbosePrint("[%d] %s() LN%d generated=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, generated);
+
if (generated > 0)
i += generated;
else
@@ -6733,7 +9291,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
- startTs = taosGetTimestampUs();
+ // only measure insert
+ // startTs = taosGetTimestampUs();
int32_t affectedRows = execInsert(pThreadInfo, generated);
@@ -6751,13 +9310,18 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (affectedRows < 0) {
- errorPrint("%s() LN%d, affected rows: %d\n",
+ errorPrint2("%s() LN%d, affected rows: %d\n",
__func__, __LINE__, affectedRows);
goto free_of_progressive;
}
pThreadInfo->totalAffectedRows += affectedRows;
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
@@ -6769,18 +9333,22 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (i >= insertRows)
break;
- } // num_of_DPT
+ } // insertRows
if ((g_args.verbose_print) &&
- (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo)
+ (tableSeq == pThreadInfo->ntables - 1) && (stbInfo)
&& (0 == strncasecmp(
- superTblInfo->dataSource,
+ stbInfo->dataSource,
"sample", strlen("sample")))) {
verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
__func__, __LINE__, pThreadInfo->samplePos);
}
} // tableSeq
+ if (percentComplete < 100) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+ }
+
free_of_progressive:
tmfree(pThreadInfo->buffer);
printStatPerThread(pThreadInfo);
@@ -6790,27 +9358,33 @@ free_of_progressive:
static void* syncWrite(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
setThreadName("syncWrite");
- uint32_t interlaceRows;
+ uint32_t interlaceRows = 0;
- if (superTblInfo) {
- if ((superTblInfo->interlaceRows == 0)
- && (g_args.interlace_rows > 0)) {
- interlaceRows = g_args.interlace_rows;
- } else {
- interlaceRows = superTblInfo->interlaceRows;
- }
+ if (stbInfo) {
+ if (stbInfo->interlaceRows < stbInfo->insertRows)
+ interlaceRows = stbInfo->interlaceRows;
} else {
- interlaceRows = g_args.interlace_rows;
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
}
if (interlaceRows > 0) {
// interlace mode
- return syncWriteInterlace(pThreadInfo);
- } else {
+ if (((stbInfo) && (STMT_IFACE == stbInfo->iface))
+ || (STMT_IFACE == g_args.iface)) {
+#if STMT_BIND_PARAM_BATCH == 1
+ return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows);
+#else
+ return syncWriteInterlaceStmt(pThreadInfo, interlaceRows);
+#endif
+ } else {
+ return syncWriteInterlace(pThreadInfo, interlaceRows);
+ }
+ }else {
// progressive mode
return syncWriteProgressive(pThreadInfo);
}
@@ -6818,10 +9392,10 @@ static void* syncWrite(void *sarg) {
static void callBack(void *param, TAOS_RES *res, int code) {
threadInfo* pThreadInfo = (threadInfo*)param;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
int insert_interval =
- superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
+ stbInfo?stbInfo->insertInterval:g_args.insert_interval;
if (insert_interval) {
pThreadInfo->et = taosGetTimestampMs();
if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) {
@@ -6829,14 +9403,14 @@ static void callBack(void *param, TAOS_RES *res, int code) {
}
}
- char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen);
+ char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen);
char data[MAX_DATA_SIZE];
char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values",
+ pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES",
pThreadInfo->db_name, pThreadInfo->tb_prefix,
pThreadInfo->start_table_from);
- // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
- if (pThreadInfo->counter >= g_args.num_of_RPR) {
+ // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) {
+ if (pThreadInfo->counter >= g_args.reqPerReq) {
pThreadInfo->start_table_from++;
pThreadInfo->counter = 0;
}
@@ -6847,17 +9421,17 @@ static void callBack(void *param, TAOS_RES *res, int code) {
return;
}
- for (int i = 0; i < g_args.num_of_RPR; i++) {
+ for (int i = 0; i < g_args.reqPerReq; i++) {
int rand_num = taosRandom() % 100;
- if (0 != pThreadInfo->superTblInfo->disorderRatio
- && rand_num < pThreadInfo->superTblInfo->disorderRatio) {
+ if (0 != pThreadInfo->stbInfo->disorderRatio
+ && rand_num < pThreadInfo->stbInfo->disorderRatio) {
int64_t d = pThreadInfo->lastTs
- - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1);
- generateStbRowData(pThreadInfo->superTblInfo, data,
+ - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1);
+ generateStbRowData(pThreadInfo->stbInfo, data,
MAX_DATA_SIZE,
d);
} else {
- generateStbRowData(pThreadInfo->superTblInfo,
+ generateStbRowData(pThreadInfo->stbInfo,
data,
MAX_DATA_SIZE,
pThreadInfo->lastTs += 1000);
@@ -6865,7 +9439,7 @@ static void callBack(void *param, TAOS_RES *res, int code) {
pstr += sprintf(pstr, "%s", data);
pThreadInfo->counter++;
- if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) {
+ if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) {
break;
}
}
@@ -6881,7 +9455,7 @@ static void callBack(void *param, TAOS_RES *res, int code) {
static void *asyncWrite(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
setThreadName("asyncWrite");
@@ -6890,7 +9464,7 @@ static void *asyncWrite(void *sarg) {
pThreadInfo->lastTs = pThreadInfo->start_time;
int insert_interval =
- superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
+ stbInfo?stbInfo->insertInterval:g_args.insert_interval;
if (insert_interval) {
pThreadInfo->st = taosGetTimestampMs();
}
@@ -6906,7 +9480,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
uint16_t rest_port = port + TSDB_PORT_HTTP;
struct hostent *server = gethostbyname(host);
if ((server == NULL) || (server->h_addr == NULL)) {
- errorPrint("%s", "ERROR, no such host");
+ errorPrint2("%s", "no such host");
return -1;
}
@@ -6928,7 +9502,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
}
static void startMultiThreadInsertData(int threads, char* db_name,
- char* precision, SSuperTable* superTblInfo) {
+ char* precision, SSuperTable* stbInfo) {
int32_t timePrec = TSDB_TIME_PRECISION_MILLI;
if (0 != precision[0]) {
@@ -6936,119 +9510,126 @@ static void startMultiThreadInsertData(int threads, char* db_name,
timePrec = TSDB_TIME_PRECISION_MILLI;
} else if (0 == strncasecmp(precision, "us", 2)) {
timePrec = TSDB_TIME_PRECISION_MICRO;
-#if NANO_SECOND_ENABLED == 1
} else if (0 == strncasecmp(precision, "ns", 2)) {
timePrec = TSDB_TIME_PRECISION_NANO;
-#endif
} else {
- errorPrint("Not support precision: %s\n", precision);
- exit(-1);
+ errorPrint2("Not support precision: %s\n", precision);
+ exit(EXIT_FAILURE);
}
}
int64_t start_time;
- if (superTblInfo) {
- if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
+ if (stbInfo) {
+ if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) {
start_time = taosGetTimestamp(timePrec);
} else {
if (TSDB_CODE_SUCCESS != taosParseTime(
- superTblInfo->startTimestamp,
+ stbInfo->startTimestamp,
&start_time,
- strlen(superTblInfo->startTimestamp),
+ strlen(stbInfo->startTimestamp),
timePrec, 0)) {
ERROR_EXIT("failed to parse time!\n");
}
}
} else {
- start_time = 1500000000000;
+ start_time = DEFAULT_START_TIME;
}
debugPrint("%s() LN%d, start_time= %"PRId64"\n",
__func__, __LINE__, start_time);
- int64_t start = taosGetTimestampMs();
-
// read sample data from file first
- if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
- "sample", strlen("sample")))) {
- if (0 != prepareSampleDataForSTable(superTblInfo)) {
- errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
- exit(-1);
- }
+ int ret;
+ if (stbInfo) {
+ ret = prepareSampleForStb(stbInfo);
+ } else {
+ ret = prepareSampleForNtb();
+ }
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, prepare sample data for stable failed!\n",
+ __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == taos0) {
- errorPrint("%s() LN%d, connect to server fail , reason: %s\n",
+ errorPrint2("%s() LN%d, connect to server fail , reason: %s\n",
__func__, __LINE__, taos_errstr(NULL));
- exit(-1);
+ exit(EXIT_FAILURE);
}
int64_t ntables = 0;
uint64_t tableFrom;
- if (superTblInfo) {
+ if (stbInfo) {
int64_t limit;
uint64_t offset;
if ((NULL != g_args.sqlFile)
- && (superTblInfo->childTblExists == TBL_NO_EXISTS)
- && ((superTblInfo->childTblOffset != 0)
- || (superTblInfo->childTblLimit >= 0))) {
+ && (stbInfo->childTblExists == TBL_NO_EXISTS)
+ && ((stbInfo->childTblOffset != 0)
+ || (stbInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
- if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
- if ((superTblInfo->childTblLimit < 0)
- || ((superTblInfo->childTblOffset
- + superTblInfo->childTblLimit)
- > (superTblInfo->childTblCount))) {
- superTblInfo->childTblLimit =
- superTblInfo->childTblCount - superTblInfo->childTblOffset;
+ if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) {
+ if ((stbInfo->childTblLimit < 0)
+ || ((stbInfo->childTblOffset
+ + stbInfo->childTblLimit)
+ > (stbInfo->childTblCount))) {
+
+ if (stbInfo->childTblCount < stbInfo->childTblOffset) {
+ printf("WARNING: offset will not be used since the child tables count is less then offset!\n");
+
+ stbInfo->childTblOffset = 0;
+ }
+ stbInfo->childTblLimit =
+ stbInfo->childTblCount - stbInfo->childTblOffset;
}
- offset = superTblInfo->childTblOffset;
- limit = superTblInfo->childTblLimit;
+ offset = stbInfo->childTblOffset;
+ limit = stbInfo->childTblLimit;
} else {
- limit = superTblInfo->childTblCount;
+ limit = stbInfo->childTblCount;
offset = 0;
}
ntables = limit;
tableFrom = offset;
- if ((superTblInfo->childTblExists != TBL_NO_EXISTS)
- && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit )
- > superTblInfo->childTblCount)) {
+ if ((stbInfo->childTblExists != TBL_NO_EXISTS)
+ && ((stbInfo->childTblOffset + stbInfo->childTblLimit)
+ > stbInfo->childTblCount)) {
printf("WARNING: specified offset + limit > child table count!\n");
prompt();
}
- if ((superTblInfo->childTblExists != TBL_NO_EXISTS)
- && (0 == superTblInfo->childTblLimit)) {
+ if ((stbInfo->childTblExists != TBL_NO_EXISTS)
+ && (0 == stbInfo->childTblLimit)) {
printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n");
prompt();
}
- superTblInfo->childTblName = (char*)calloc(1,
+ stbInfo->childTblName = (char*)calloc(1,
limit * TSDB_TABLE_NAME_LEN);
- if (superTblInfo->childTblName == NULL) {
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ if (stbInfo->childTblName == NULL) {
taos_close(taos0);
- exit(-1);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
int64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos0,
- db_name, superTblInfo->sTblName,
- &superTblInfo->childTblName, &childTblCount,
+ db_name, stbInfo->stbName,
+ &stbInfo->childTblName, &childTblCount,
limit,
offset);
+ ntables = childTblCount; // CBD
} else {
- ntables = g_args.num_of_tables;
+ ntables = g_args.ntables;
tableFrom = 0;
}
@@ -7065,11 +9646,11 @@ static void startMultiThreadInsertData(int threads, char* db_name,
b = ntables % threads;
}
- if ((superTblInfo)
- && (superTblInfo->iface == REST_IFACE)) {
+ if ((stbInfo)
+ && (stbInfo->iface == REST_IFACE)) {
if (convertHostToServAddr(
g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) {
- exit(-1);
+ ERROR_EXIT("convert host to server address");
}
}
@@ -7082,94 +9663,147 @@ static void startMultiThreadInsertData(int threads, char* db_name,
memset(pids, 0, threads * sizeof(pthread_t));
memset(infos, 0, threads * sizeof(threadInfo));
+ char *stmtBuffer = calloc(1, BUFFER_SIZE);
+ assert(stmtBuffer);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ uint32_t interlaceRows = 0;
+ uint32_t batch;
+
+ if (stbInfo) {
+ if ((stbInfo->interlaceRows == 0)
+ && (g_args.interlaceRows > 0)
+ ) {
+ interlaceRows = g_args.interlaceRows;
+
+ } else {
+ interlaceRows = stbInfo->interlaceRows;
+ }
+
+ if (interlaceRows > stbInfo->insertRows) {
+ interlaceRows = 0;
+ }
+ } else {
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
+ }
+
+ if (interlaceRows > 0) {
+ batch = interlaceRows;
+ } else {
+ batch = (g_args.reqPerReq>g_args.insertRows)?
+ g_args.insertRows:g_args.reqPerReq;
+ }
+
+#endif
+
+ if ((g_args.iface == STMT_IFACE)
+ || ((stbInfo)
+ && (stbInfo->iface == STMT_IFACE))) {
+ char *pstr = stmtBuffer;
+
+ if ((stbInfo)
+ && (AUTO_CREATE_SUBTBL
+ == stbInfo->autoCreateTable)) {
+ pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?",
+ stbInfo->stbName);
+ for (int tag = 0; tag < (stbInfo->tagCount - 1);
+ tag ++ ) {
+ pstr += sprintf(pstr, ",?");
+ }
+ pstr += sprintf(pstr, ") VALUES(?");
+ } else {
+ pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
+ }
+
+ int columnCount = (stbInfo)?
+ stbInfo->columnCount:
+ g_args.columnCount;
+
+ for (int col = 0; col < columnCount; col ++) {
+ pstr += sprintf(pstr, ",?");
+ }
+ pstr += sprintf(pstr, ")");
+
+ debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer);
+#if STMT_BIND_PARAM_BATCH == 1
+ parseSamplefileToStmtBatch(stbInfo);
+#endif
+ }
+
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->time_precision = timePrec;
- pThreadInfo->superTblInfo = superTblInfo;
+ pThreadInfo->stbInfo = stbInfo;
pThreadInfo->start_time = start_time;
pThreadInfo->minDelay = UINT64_MAX;
- if ((NULL == superTblInfo) ||
- (superTblInfo->iface != REST_IFACE)) {
+ if ((NULL == stbInfo) ||
+ (stbInfo->iface != REST_IFACE)) {
//t_info->taos = taos;
pThreadInfo->taos = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == pThreadInfo->taos) {
- errorPrint(
+ free(infos);
+ errorPrint2(
"%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
- free(infos);
- exit(-1);
+ exit(EXIT_FAILURE);
}
-#if STMT_IFACE_ENABLED == 1
if ((g_args.iface == STMT_IFACE)
- || ((superTblInfo)
- && (superTblInfo->iface == STMT_IFACE))) {
-
- int columnCount;
- if (superTblInfo) {
- columnCount = superTblInfo->columnCount;
- } else {
- columnCount = g_args.num_of_CPR;
- }
+ || ((stbInfo)
+ && (stbInfo->iface == STMT_IFACE))) {
pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos);
if (NULL == pThreadInfo->stmt) {
- errorPrint(
+ free(pids);
+ free(infos);
+ errorPrint2(
"%s() LN%d, failed init stmt, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
+ exit(EXIT_FAILURE);
+ }
+
+ if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) {
free(pids);
free(infos);
- exit(-1);
+ free(stmtBuffer);
+ errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
+ ret, taos_stmt_errstr(pThreadInfo->stmt));
+ exit(EXIT_FAILURE);
}
+ pThreadInfo->bind_ts = malloc(sizeof(int64_t));
- char buffer[BUFFER_SIZE];
- char *pstr = buffer;
-
- if ((superTblInfo)
- && (AUTO_CREATE_SUBTBL
- == superTblInfo->autoCreateTable)) {
- pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?",
- superTblInfo->sTblName);
- for (int tag = 0; tag < (superTblInfo->tagCount - 1);
- tag ++ ) {
- pstr += sprintf(pstr, ",?");
- }
- pstr += sprintf(pstr, ") VALUES(?");
+ if (stbInfo) {
+#if STMT_BIND_PARAM_BATCH == 1
+ parseStbSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+#else
+ parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec);
+#endif
} else {
- pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
- }
-
- for (int col = 0; col < columnCount; col ++) {
- pstr += sprintf(pstr, ",?");
- }
- pstr += sprintf(pstr, ")");
-
- debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer);
- int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0);
- if (ret != 0){
- errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
- ret, taos_stmt_errstr(pThreadInfo->stmt));
- free(pids);
- free(infos);
- exit(-1);
+#if STMT_BIND_PARAM_BATCH == 1
+ parseNtbSampleToStmtBatchForThread(
+ pThreadInfo, timePrec, batch);
+#else
+ parseNtbSampleToStmt(pThreadInfo, timePrec);
+#endif
}
}
-#endif
} else {
pThreadInfo->taos = NULL;
}
- /* if ((NULL == superTblInfo)
- || (0 == superTblInfo->multiThreadWriteOneTbl)) {
+ /* if ((NULL == stbInfo)
+ || (0 == stbInfo->multiThreadWriteOneTbl)) {
*/
pThreadInfo->start_table_from = tableFrom;
pThreadInfo->ntables = iend_table_to + 1;
/* } else {
pThreadInfo->start_table_from = 0;
- pThreadInfo->ntables = superTblInfo->childTblCount;
+ pThreadInfo->ntables = stbInfo->childTblCount;
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
@@ -7189,6 +9823,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
+ free(stmtBuffer);
+
+ int64_t start = taosGetTimestampUs();
+
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
}
@@ -7203,22 +9841,44 @@ static void startMultiThreadInsertData(int threads, char* db_name,
threadInfo *pThreadInfo = infos + i;
tsem_destroy(&(pThreadInfo->lock_sem));
+ taos_close(pThreadInfo->taos);
-#if STMT_IFACE_ENABLED == 1
if (pThreadInfo->stmt) {
taos_stmt_close(pThreadInfo->stmt);
}
+
+ tmfree((char *)pThreadInfo->bind_ts);
+#if STMT_BIND_PARAM_BATCH == 1
+ tmfree((char *)pThreadInfo->bind_ts_array);
+ tmfree(pThreadInfo->bindParams);
+ tmfree(pThreadInfo->is_null);
+#else
+ if (pThreadInfo->sampleBindArray) {
+ for (int k = 0; k < MAX_SAMPLES; k++) {
+ uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)(
+ pThreadInfo->sampleBindArray
+ + sizeof(uintptr_t *) * k));
+ int columnCount = (pThreadInfo->stbInfo)?
+ pThreadInfo->stbInfo->columnCount:
+ g_args.columnCount;
+ for (int c = 1; c < columnCount + 1; c++) {
+ TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c));
+ if (bind)
+ tmfree(bind->buffer);
+ }
+ tmfree((char *)tmp);
+ }
+ tmfree(pThreadInfo->sampleBindArray);
+ }
#endif
- tsem_destroy(&(pThreadInfo->lock_sem));
- taos_close(pThreadInfo->taos);
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->threadID, pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows);
- if (superTblInfo) {
- superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
- superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows;
+ if (stbInfo) {
+ stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
+ stbInfo->totalInsertRows += pThreadInfo->totalInsertRows;
} else {
g_args.totalAffectedRows += pThreadInfo->totalAffectedRows;
g_args.totalInsertRows += pThreadInfo->totalInsertRows;
@@ -7229,48 +9889,44 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
}
- cntDelay -= 1;
if (cntDelay == 0) cntDelay = 1;
avgDelay = (double)totalDelay / cntDelay;
- int64_t end = taosGetTimestampMs();
+ int64_t end = taosGetTimestampUs();
int64_t t = end - start;
+ if (0 == t) t = 1;
- double tInMs = t/1000.0;
+ double tInMs = (double) t / 1000000.0;
- if (superTblInfo) {
- fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
- tInMs, superTblInfo->totalInsertRows,
- superTblInfo->totalAffectedRows,
- threads, db_name, superTblInfo->sTblName,
- (tInMs)?
- (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX);
+ if (stbInfo) {
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ tInMs, stbInfo->totalInsertRows,
+ stbInfo->totalAffectedRows,
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
- tInMs, superTblInfo->totalInsertRows,
- superTblInfo->totalAffectedRows,
- threads, db_name, superTblInfo->sTblName,
- (tInMs)?
- (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX);
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ tInMs, stbInfo->totalInsertRows,
+ stbInfo->totalAffectedRows,
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
}
} else {
- fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
tInMs, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
- (tInMs)?
- (double)(g_args.totalInsertRows/tInMs):FLT_MAX);
+ (double)(g_args.totalInsertRows/tInMs));
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
tInMs, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
- (tInMs)?
- (double)(g_args.totalInsertRows/tInMs):FLT_MAX);
+ (double)(g_args.totalInsertRows/tInMs));
}
}
@@ -7296,25 +9952,28 @@ static void *readTable(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
setThreadName("readTable");
- char command[BUFFER_SIZE] = "\0";
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
+
uint64_t sTime = pThreadInfo->start_time;
char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
- errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ free(command);
return NULL;
}
- int64_t num_of_DPT;
- /* if (pThreadInfo->superTblInfo) {
- num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
+ int64_t insertRows;
+ /* if (pThreadInfo->stbInfo) {
+ insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table;
} else {
*/
- num_of_DPT = g_args.num_of_DPT;
+ insertRows = g_args.insertRows;
// }
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
@@ -7327,8 +9986,8 @@ static void *readTable(void *sarg) {
for (int j = 0; j < n; j++) {
double totalT = 0;
uint64_t count = 0;
- for (int64_t i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
+ for (int64_t i = 0; i < ntables; i++) {
+ sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64,
g_aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
@@ -7336,10 +9995,11 @@ static void *readTable(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
+ free(command);
return NULL;
}
@@ -7355,11 +10015,12 @@ static void *readTable(void *sarg) {
fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData,
- (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
+ (double)(ntables * insertRows) / totalT, totalT * 1000);
printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000);
}
fprintf(fp, "\n");
fclose(fp);
+ free(command);
#endif
return NULL;
}
@@ -7369,16 +10030,19 @@ static void *readMetric(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
setThreadName("readMetric");
- char command[BUFFER_SIZE] = "\0";
+ char *command = calloc(1, BUFFER_SIZE);
+ assert(command);
+
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ free(command);
return NULL;
}
- int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
+ int64_t insertRows = pThreadInfo->stbInfo->insertRows;
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
@@ -7392,7 +10056,7 @@ static void *readMetric(void *sarg) {
char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
- int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
+ int64_t m = 10 < ntables ? 10 : ntables;
for (int64_t i = 1; i <= m; i++) {
if (i == 1) {
@@ -7402,7 +10066,7 @@ static void *readMetric(void *sarg) {
}
strncat(condition, tempS, COND_BUF_LEN - 1);
- sprintf(command, "select %s from meters where %s", g_aggreFunc[j], condition);
+ sprintf(command, "SELECT %s FROM meters WHERE %s", g_aggreFunc[j], condition);
printf("Where condition: %s\n", condition);
fprintf(fp, "%s\n", command);
@@ -7413,10 +10077,11 @@ static void *readMetric(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
+ free(command);
return NULL;
}
int count = 0;
@@ -7426,7 +10091,7 @@ static void *readMetric(void *sarg) {
t = taosGetTimestampMs() - t;
fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
- num_of_tables * num_of_DPT / (t * 1000.0), t);
+ ntables * insertRows / (t * 1000.0), t);
printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0);
taos_free_result(pSql);
@@ -7434,6 +10099,7 @@ static void *readMetric(void *sarg) {
fprintf(fp, "\n");
}
fclose(fp);
+ free(command);
#endif
return NULL;
}
@@ -7458,7 +10124,7 @@ static int insertTestProcess() {
debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile);
g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
if (NULL == g_fpOfInsertResult) {
- errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile);
+ errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile);
return -1;
}
@@ -7470,13 +10136,18 @@ static int insertTestProcess() {
init_rand_data();
// create database and super tables
- if(createDatabasesAndStables() != 0) {
+ char *cmdBuffer = calloc(1, BUFFER_SIZE);
+ assert(cmdBuffer);
+
+ if(createDatabasesAndStables(cmdBuffer) != 0) {
if (g_fpOfInsertResult)
fclose(g_fpOfInsertResult);
+ free(cmdBuffer);
return -1;
}
+ free(cmdBuffer);
- // pretreatement
+ // pretreatment
if (prepareSampleData() != 0) {
if (g_fpOfInsertResult)
fclose(g_fpOfInsertResult);
@@ -7486,18 +10157,30 @@ static int insertTestProcess() {
double start;
double end;
- // create child tables
- start = taosGetTimestampMs();
- createChildTables();
- end = taosGetTimestampMs();
-
if (g_totalChildTables > 0) {
- fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ fprintf(stderr,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ }
+
+ // create child tables
+ start = taosGetTimestampMs();
+ createChildTables();
+ end = taosGetTimestampMs();
+
+ fprintf(stderr,
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
}
}
@@ -7508,14 +10191,14 @@ static int insertTestProcess() {
if (g_Dbs.db[i].superTblCount > 0) {
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j];
+ SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j];
- if (superTblInfo && (superTblInfo->insertRows > 0)) {
+ if (stbInfo && (stbInfo->insertRows > 0)) {
startMultiThreadInsertData(
g_Dbs.threadCount,
g_Dbs.db[i].dbName,
g_Dbs.db[i].dbCfg.precision,
- superTblInfo);
+ stbInfo);
}
}
}
@@ -7555,7 +10238,7 @@ static void *specifiedTableQuery(void *sarg) {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
@@ -7567,7 +10250,7 @@ static void *specifiedTableQuery(void *sarg) {
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint("use database %s failed!\n\n",
g_queryInfo.dbName);
return NULL;
}
@@ -7608,7 +10291,7 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t currentPrintTime = taosGetTimestampMs();
uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n",
__func__, __LINE__, endTs, startTs);
printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
@@ -7643,7 +10326,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) {
}
static void *superTableQuery(void *sarg) {
- char sqlstr[BUFFER_SIZE];
+ char *sqlstr = calloc(1, BUFFER_SIZE);
+ assert(sqlstr);
+
threadInfo *pThreadInfo = (threadInfo *)sarg;
setThreadName("superTableQuery");
@@ -7658,6 +10343,7 @@ static void *superTableQuery(void *sarg) {
if (taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
+ free(sqlstr);
return NULL;
} else {
pThreadInfo->taos = taos;
@@ -7682,7 +10368,7 @@ static void *superTableQuery(void *sarg) {
st = taosGetTimestampMs();
for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
- memset(sqlstr,0,sizeof(sqlstr));
+ memset(sqlstr, 0, BUFFER_SIZE);
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
if (g_queryInfo.superQueryInfo.result[j][0] != '\0') {
sprintf(pThreadInfo->filePath, "%s-%d",
@@ -7713,6 +10399,7 @@ static void *superTableQuery(void *sarg) {
(double)(et - st)/1000.0);
}
+ free(sqlstr);
return NULL;
}
@@ -7729,15 +10416,15 @@ static int queryTestProcess() {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
- exit(-1);
+ exit(EXIT_FAILURE);
}
if (0 != g_queryInfo.superQueryInfo.sqlCount) {
getAllChildNameOfSuperTable(taos,
g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
+ g_queryInfo.superQueryInfo.stbName,
&g_queryInfo.superQueryInfo.childTblName,
&g_queryInfo.superQueryInfo.childTblCount);
}
@@ -7751,7 +10438,7 @@ static int queryTestProcess() {
if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
if (convertHostToServAddr(
g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0)
- exit(-1);
+ ERROR_EXIT("convert host to server address");
}
pthread_t *pids = NULL;
@@ -7787,13 +10474,13 @@ static int queryTestProcess() {
taos_close(taos);
free(infos);
free(pids);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
return -1;
}
}
- pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL;// workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedTableQuery,
pThreadInfo);
@@ -7843,7 +10530,7 @@ static int queryTestProcess() {
pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
tableFrom = pThreadInfo->end_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
}
@@ -7870,7 +10557,7 @@ static int queryTestProcess() {
tmfree((char*)pidsOfSub);
tmfree((char*)infosOfSub);
- // taos_close(taos);// TODO: workaround to use separate taos connection;
+ // taos_close(taos);// workaround to use separate taos connection;
uint64_t endTs = taosGetTimestampMs();
uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
@@ -7885,27 +10572,27 @@ static int queryTestProcess() {
static void stable_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
if (param)
fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ // tao_unsubscribe() will free result.
}
static void specified_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
if (param)
fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ // tao_unsubscribe() will free result.
}
static TAOS_SUB* subscribeImpl(
@@ -7937,7 +10624,7 @@ static TAOS_SUB* subscribeImpl(
}
if (tsub == NULL) {
- errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
return NULL;
}
@@ -7946,16 +10633,19 @@ static TAOS_SUB* subscribeImpl(
static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
- char subSqlstr[BUFFER_SIZE];
+ char *subSqlStr = calloc(1, BUFFER_SIZE);
+ assert(subSqlStr);
+
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
setThreadName("superSub");
if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
+ free(subSqlStr);
errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
pThreadInfo->ntables, MAX_QUERY_SQL_COUNT);
- exit(-1);
+ exit(EXIT_FAILURE);
}
if (pThreadInfo->taos == NULL) {
@@ -7965,8 +10655,9 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
+ free(subSqlStr);
return NULL;
}
}
@@ -7975,8 +10666,9 @@ static void *superSubscribe(void *sarg) {
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
+ free(subSqlStr);
return NULL;
}
@@ -7991,25 +10683,26 @@ static void *superSubscribe(void *sarg) {
pThreadInfo->end_table_to, i);
sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"",
i, pThreadInfo->querySeq);
- memset(subSqlstr, 0, sizeof(subSqlstr));
+ memset(subSqlStr, 0, BUFFER_SIZE);
replaceChildTblName(
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
- subSqlstr, i);
+ subSqlStr, i);
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
}
- verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n",
- __func__, __LINE__, pThreadInfo->threadID, subSqlstr);
+ verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n",
+ __func__, __LINE__, pThreadInfo->threadID, subSqlStr);
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
- pThreadInfo, subSqlstr, topic,
+ pThreadInfo, subSqlStr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval);
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
+ free(subSqlStr);
return NULL;
}
}
@@ -8066,12 +10759,13 @@ static void *superSubscribe(void *sarg) {
consumed[tsubSeq]= 0;
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
- pThreadInfo, subSqlstr, topic,
+ pThreadInfo, subSqlStr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval
);
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
+ free(subSqlStr);
return NULL;
}
}
@@ -8091,6 +10785,7 @@ static void *superSubscribe(void *sarg) {
}
taos_close(pThreadInfo->taos);
+ free(subSqlStr);
return NULL;
}
@@ -8107,7 +10802,7 @@ static void *specifiedSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
}
@@ -8214,20 +10909,20 @@ static int subscribeTestProcess() {
g_queryInfo.dbName,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
- exit(-1);
+ exit(EXIT_FAILURE);
}
if (0 != g_queryInfo.superQueryInfo.sqlCount) {
getAllChildNameOfSuperTable(taos,
g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
+ g_queryInfo.superQueryInfo.stbName,
&g_queryInfo.superQueryInfo.childTblName,
&g_queryInfo.superQueryInfo.childTblCount);
}
- taos_close(taos); // TODO: workaround to use separate taos connection;
+ taos_close(taos); // workaround to use separate taos connection;
pthread_t *pids = NULL;
threadInfo *infos = NULL;
@@ -8237,15 +10932,15 @@ static int subscribeTestProcess() {
//==== create threads for query for specified table
if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
- debugPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ debugPrint("%s() LN%d, specified query sqlCount %d.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount);
} else {
if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ errorPrint2("%s() LN%d, specified query sqlCount %d.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount);
- exit(-1);
+ exit(EXIT_FAILURE);
}
pids = calloc(
@@ -8259,8 +10954,8 @@ static int subscribeTestProcess() {
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
- exit(-1);
+ errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
@@ -8269,7 +10964,7 @@ static int subscribeTestProcess() {
threadInfo *pThreadInfo = infos + seq;
pThreadInfo->threadID = seq;
pThreadInfo->querySeq = i;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
}
}
@@ -8294,10 +10989,10 @@ static int subscribeTestProcess() {
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n",
+ errorPrint2("%s() LN%d, malloc failed for create threads\n",
__func__, __LINE__);
// taos_close(taos);
- exit(-1);
+ exit(EXIT_FAILURE);
}
int64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
@@ -8326,7 +11021,7 @@ static int subscribeTestProcess() {
pThreadInfo->ntables = jend_table_to = jend_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pidsOfStable + seq,
NULL, superSubscribe, pThreadInfo);
}
@@ -8366,7 +11061,7 @@ static void initOfInsertMeta() {
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
g_Dbs.port = 6030;
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);
g_Dbs.threadCount = 2;
g_Dbs.use_metric = g_args.use_metric;
@@ -8379,7 +11074,7 @@ static void initOfQueryMeta() {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
g_queryInfo.port = 6030;
tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);
}
static void setParaFromArg() {
@@ -8393,16 +11088,14 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE);
}
- if (g_args.password) {
- tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE);
- }
+ tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN);
if (g_args.port) {
g_Dbs.port = g_args.port;
}
- g_Dbs.threadCount = g_args.num_of_threads;
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = true;
@@ -8419,22 +11112,23 @@ static void setParaFromArg() {
g_Dbs.do_aggreFunc = true;
char dataString[TSDB_MAX_BYTES_PER_ROW];
- char **data_type = g_args.datatype;
+ char *data_type = g_args.data_type;
+ char **dataType = g_args.dataType;
memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW);
- if (strcasecmp(data_type[0], "BINARY") == 0
- || strcasecmp(data_type[0], "BOOL") == 0
- || strcasecmp(data_type[0], "NCHAR") == 0 ) {
+ if ((data_type[0] == TSDB_DATA_TYPE_BINARY)
+ || (data_type[0] == TSDB_DATA_TYPE_BOOL)
+ || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) {
g_Dbs.do_aggreFunc = false;
}
if (g_args.use_metric) {
g_Dbs.db[0].superTblCount = 1;
- tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN);
- g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
- g_Dbs.threadCount = g_args.num_of_threads;
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN);
+ g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables;
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.asyncMode = g_args.async_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
@@ -8454,26 +11148,28 @@ static void setParaFromArg() {
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step;
- g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
+ g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows;
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
g_Dbs.db[0].superTbls[0].columnCount = 0;
for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
- if (data_type[i] == NULL) {
+ if (data_type[i] == TSDB_DATA_TYPE_NULL) {
break;
}
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i];
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1));
- g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
+ dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1));
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth;
g_Dbs.db[0].superTbls[0].columnCount++;
}
- if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) {
- g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
+ if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) {
+ g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount;
} else {
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
- i < g_args.num_of_CPR; i++) {
+ i < g_args.columnCount; i++) {
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT;
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
"INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
@@ -8487,10 +11183,10 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
"BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1));
- g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
+ g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.db[0].superTbls[0].tagCount = 0;
}
}
@@ -8502,8 +11198,7 @@ static int regexMatch(const char *s, const char *reg, int cflags) {
/* Compile regular expression */
if (regcomp(®ex, reg, cflags) != 0) {
- printf("Fail to compile regex\n");
- exit(-1);
+ ERROR_EXIT("Fail to compile regex\n");
}
/* Execute regular expression */
@@ -8516,9 +11211,9 @@ static int regexMatch(const char *s, const char *reg, int cflags) {
return 0;
} else {
regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
- printf("Regex match failed: %s\n", msgbuf);
regfree(®ex);
- exit(-1);
+ printf("Regex match failed: %s\n", msgbuf);
+ exit(EXIT_FAILURE);
}
return 0;
@@ -8563,7 +11258,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
memcpy(cmd + cmd_len, line, read_len);
if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
- errorPrint("%s() LN%d, queryDbExec %s failed!\n",
+ errorPrint2("%s() LN%d, queryDbExec %s failed!\n",
__func__, __LINE__, cmd);
tmfree(cmd);
tmfree(line);
@@ -8613,19 +11308,19 @@ static void queryResult() {
pthread_t read_id;
threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
assert(pThreadInfo);
- pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
+ pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;
//pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
if (g_args.use_metric) {
pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
- pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
+ pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(pThreadInfo->tb_prefix,
g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN);
} else {
- pThreadInfo->ntables = g_args.num_of_tables;
- pThreadInfo->end_table_to = g_args.num_of_tables -1;
+ pThreadInfo->ntables = g_args.ntables;
+ pThreadInfo->end_table_to = g_args.ntables -1;
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
}
@@ -8636,10 +11331,10 @@ static void queryResult() {
g_Dbs.db[0].dbName,
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
- taos_errstr(NULL));
free(pThreadInfo);
- exit(-1);
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
+ taos_errstr(NULL));
+ exit(EXIT_FAILURE);
}
tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
@@ -8659,7 +11354,7 @@ static void testCmdLine() {
if (strlen(configDir)) {
wordexp_t full_path;
if (wordexp(configDir, &full_path, 0) != 0) {
- errorPrint( "Invalid path %s\n", configDir);
+ errorPrint("Invalid path %s\n", configDir);
return;
}
taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
@@ -8679,6 +11374,7 @@ int main(int argc, char *argv[]) {
debugPrint("meta file: %s\n", g_args.metaFile);
if (g_args.metaFile) {
+ g_totalChildTables = 0;
initOfInsertMeta();
initOfQueryMeta();
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 6f7a99b2df1e3d7bbca716afca4903576711f58f..fe7616fa174f5af707892cf3d251689a60111ed6 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -27,7 +27,7 @@
#include "tutil.h"
#include