diff --git a/.drone.yml b/.drone.yml
index 4b14bce0b8aa38dfc975b5d0f3253130d2025dca..085a07acf94a57cbcaf076c149cebdf243f8ff74 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -23,6 +23,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: test_arm64_bionic
@@ -150,6 +151,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_trusty
@@ -176,6 +178,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_xenial
@@ -201,7 +204,7 @@ steps:
branch:
- develop
- master
-
+ - 2.0
---
kind: pipeline
name: build_bionic
@@ -226,6 +229,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_centos7
@@ -249,4 +253,4 @@ steps:
branch:
- develop
- master
-
+ - 2.0
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 50f4251320abc80358b67eab22c02672d5f26bd6..2c37aa92f77dd14bd274be94568dfe904f48c5f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
build/
+.ycm_extra_conf.py
.vscode/
.idea/
cmake-build-debug/
diff --git a/.gitmodules b/.gitmodules
index a2266c46afd180b52d3aa19003380078894f6a4b..3d721fa8954023f92f8dcc70b09a1424d0104bbe 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +1,12 @@
[submodule "src/connector/go"]
path = src/connector/go
- url = git@github.com:taosdata/driver-go.git
+ url = https://github.com/taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
- url = git@github.com:taosdata/grafanaplugin.git
+ url = https://github.com/taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
- url = git@github.com:taosdata/hivemq-tdengine-extension.git
+ url = https://github.com/taosdata/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
diff --git a/Jenkinsfile b/Jenkinsfile
index b073c32e1384dc7fa527695ab3be8dfde26be978..f076a046686fd62a07695cfe3911e1baacf5c5d5 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -224,6 +224,34 @@ pipeline {
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
+ sh '''
+ rm -rf /var/lib/taos/*
+ rm -rf /var/log/taos/*
+ nohup taosd >/dev/null &
+ sleep 10
+ '''
+ sh '''
+ cd ${WKC}/tests/examples/nodejs
+ npm install td2.0-connector > /dev/null 2>&1
+ node nodejsChecker.js host=localhost
+ node test1970.js
+ cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
+ npm install td2.0-connector > /dev/null 2>&1
+ node nanosecondTest.js
+
+ '''
+ sh '''
+ cd ${WKC}/tests/examples/C#/taosdemo
+ mcs -out:taosdemo *.cs > /dev/null 2>&1
+ echo '' |./taosdemo -c /etc/taos
+ cd ${WKC}/tests/connectorTest/C#Test/nanosupport
+ mcs -out:nano *.cs > /dev/null 2>&1
+ echo '' |./nano
+ '''
+ sh '''
+ cd ${WKC}/tests/gotest
+ bash batchtest.sh
+ '''
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
@@ -236,13 +264,11 @@ pipeline {
steps {
pre_test()
- catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- timeout(time: 60, unit: 'MINUTES'){
- sh '''
- cd ${WKC}/tests/pytest
- ./crash_gen.sh -a -p -t 4 -s 2000
- '''
- }
+ timeout(time: 60, unit: 'MINUTES'){
+ sh '''
+ cd ${WKC}/tests/pytest
+ ./crash_gen.sh -a -p -t 4 -s 2000
+ '''
}
timeout(time: 60, unit: 'MINUTES'){
sh '''
@@ -433,4 +459,4 @@ pipeline {
)
}
}
-}
\ No newline at end of file
+}
diff --git a/README-CN.md b/README-CN.md
index a9bc814e8d6f6bef0ad94e29588f62e2e4c0e7f1..d7192c939780a272acdebc94baf474aeaf0d7a38 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -107,6 +107,12 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
+如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。
+```
+[url "git@github.com:"]
+ insteadOf = https://github.com/
+```
+
## 构建 TDengine
### Linux 系统
diff --git a/README.md b/README.md
index 2dea05f09d268b0d78de15ab98f3584df055c353..ab9e0348c8547c43bdbcb4df44a88c53429971e3 100644
--- a/README.md
+++ b/README.md
@@ -101,6 +101,12 @@ so you should run this command in the TDengine directory to install them:
git submodule update --init --recursive
```
+You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
+```
+[url "git@github.com:"]
+ insteadOf = https://github.com/
+```
+
## Build TDengine
### On Linux platform
diff --git a/cmake/define.inc b/cmake/define.inc
index 6c466fee026097b0bdeb89c7a4fc54fc382c2726..7894e6dab5d4ddd44e69f77702004183f431d3a6 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -45,6 +45,10 @@ IF (TD_TQ)
ADD_DEFINITIONS(-D_TD_TQ_)
ENDIF ()
+IF (TD_PRO)
+ ADD_DEFINITIONS(-D_TD_PRO_)
+ENDIF ()
+
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
@@ -133,8 +137,10 @@ IF (TD_LINUX)
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
+ MESSAGE(STATUS "memory sanitizer detected as true")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
+ MESSAGE(STATUS "memory sanitizer detected as false")
ENDIF ()
SET(RELEASE_FLAGS "-O3 -Wno-error")
@@ -180,7 +186,7 @@ IF (TD_WINDOWS)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
- SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
+ SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd5999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
diff --git a/cmake/env.inc b/cmake/env.inc
index 2ceaecc2d9e486c249931ae45089e6a820e475b9..a173a19749860c51284e510ea6152ed90b639828 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -34,12 +34,22 @@ ENDIF ()
#
# Set compiler options
-SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
+IF (TD_LINUX)
+ SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
+ELSE ()
+ SET(COMMON_C_FLAGS "${COMMON_FLAGS} ")
+ENDIF ()
+
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
-SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
+IF (TD_WINDOWS)
+ SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
+ELSE ()
+ SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
+ENDIF ()
+
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
diff --git a/cmake/input.inc b/cmake/input.inc
index 9d716e1e7345955f7b6b844c85ace7e7bd5c6080..d746cf52f6eb016795d6fa6d01f408925159c710 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -49,6 +49,9 @@ IF (${DBNAME} MATCHES "power")
ELSEIF (${DBNAME} MATCHES "tq")
SET(TD_TQ TRUE)
MESSAGE(STATUS "tq is true")
+ELSEIF (${DBNAME} MATCHES "pro")
+ SET(TD_PRO TRUE)
+ MESSAGE(STATUS "pro is true")
ENDIF ()
IF (${DLLTYPE} MATCHES "go")
diff --git a/cmake/install.inc b/cmake/install.inc
index e9ad240a793b9736edbe5769c6af12276e13a1a6..7ea2ba8da0af79c15378cda956a330b357804c5a 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
diff --git a/cmake/version.inc b/cmake/version.inc
index ffceecf49215bcef9136c0b5d2f0f1e1290c9ed1..dfeb26454f9b6278132c3a92640a6aa8611456da 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.1.6.0")
+ SET(TD_VER_NUMBER "2.1.7.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
@@ -86,7 +86,7 @@ ENDIF ()
MESSAGE(STATUS "============= compile version parameter information start ============= ")
MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER})
MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE})
-MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT})
+MESSAGE(STATUS "community commit id:" ${TD_VER_GIT})
MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL})
MESSAGE(STATUS "build date:" ${TD_VER_DATE})
MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE})
diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c
index 40b6e6462d9d0a6dc53509e8645cfba50b446256..1ec0dc73547852e37a23ff308cb740bbd88d872c 100644
--- a/deps/MsvcLibX/src/iconv.c
+++ b/deps/MsvcLibX/src/iconv.c
@@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes;
char *pBuf;
+ char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes);
if (!pBuf) {
@@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf);
return NULL;
}
- pBuf = realloc(pBuf, nBytes+1);
- return pBuf;
+ pBuf1 = realloc(pBuf, nBytes+1);
+ if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
+ return pBuf1;
}
int CountCharacters(const char *string, UINT cp) {
diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c
index f366b081ad688e15dc62dd0c8a7ccf9bb409afe0..85f4c83f24400e12c4a4b996b863df94e07cf819 100644
--- a/deps/MsvcLibX/src/main.c
+++ b/deps/MsvcLibX/src/main.c
@@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0;
char **ppszArg;
+ char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE;
ppszArg[argc++] = pszCopy+j;
- ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ if(ppszArg1 == NULL && ppszArg != NULL)
+ free(ppszArg);
+ ppszArg = ppszArg1;
if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0';
}
@@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0';
}
- realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
+ //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */
diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c
index 5fbcf773a24d0950ac8099cd9274fcbb3157a954..e2ba755f2d8b60e545e8ead71d198e60ca7a47c0 100644
--- a/deps/MsvcLibX/src/realpath.c
+++ b/deps/MsvcLibX/src/realpath.c
@@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
int iErr;
const char *pc;
@@ -242,8 +243,11 @@ realpath_failed:
return NULL;
}
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
- return pOutbuf;
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
+ return pOutbuf1;
}
#endif
@@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
char *pPath1 = NULL;
char *pPath2 = NULL;
int iErr;
@@ -590,10 +595,13 @@ realpathU_failed:
}
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
free(pPath1);
free(pPath2);
- return pOutbuf;
+ return pOutbuf1;
}
#endif /* defined(_WIN32) */
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index 18bdc15d30430516c3ae6c847fc448477003dd66..df5a82517183f967aaaeb6767804cefa795301a1 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -40,17 +40,19 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表
* [标签管理](/taos-sql#tags):增加、删除、修改标签
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
-* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
+* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、嵌套查询、UINON、JOIN、查询结果手动分页等
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
+* [UDF](/taos-sql/udf):用户定义函数的创建和管理方法
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
## [高效写入数据](/insert)
-* [SQL写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录
-* [Prometheus写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入
-* [Telegraf写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入
+* [SQL 写入](/insert#sql):使用SQL insert命令向一张或多张表写入单条或多条记录
+* [Schemaless 写入](/insert#schemaless):免于预先建表,将数据直接写入时自动维护元数据结构
+* [Prometheus 写入](/insert#prometheus):配置Prometheus, 不用任何代码,将数据直接写入
+* [Telegraf 写入](/insert#telegraf):配置Telegraf, 不用任何代码,将采集数据直接写入
* [EMQ X Broker](/insert#emq):配置EMQ X,不用任何代码,就可将MQTT数据直接写入
* [HiveMQ Broker](/insert#hivemq):配置HiveMQ,不用任何代码,就可将MQTT数据直接写入
diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md
index 7f70ccec5681ffd751cd1372d9c0926bf3f3beda..050046645c24e7db58ef2f39683433c3a4b53169 100644
--- a/documentation20/cn/01.evaluation/docs.md
+++ b/documentation20/cn/01.evaluation/docs.md
@@ -2,61 +2,64 @@
## TDengine 简介
-TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
+TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,TDengine 在时序空间大数据处理上,有着自己独到的优势。
-TDengine的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与Hadoop等典型的大数据平台相比,它具有如下鲜明的特点:
+TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网和工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,TDengine 具有如下鲜明的特点:
-* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
-* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
-* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
-* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。
-* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
-* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
+* __10 倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
+* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储占用不到通用数据库的 1/10。
+* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。
+* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。
+* __高可用性和水平扩展__:通过分布式架构和一致性算法,通过多复制和集群特性,TDengine确保了高可用性和水平扩展性以支持关键任务应用程序。
+* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。
+* __核心开源__:除了一些辅助功能外,TDengine的核心是开源的。企业再也不会被数据库绑定了。这使生态更加强大,产品更加稳定,开发者社区更加活跃。
-采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
+采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。

图 1. TDengine技术生态图
-
## TDengine 总体适用场景
-作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。
-
+作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。
### 数据源特点和需求
-从数据源角度,设计人员可以从下面几个角度分析TDengine在目标应用系统里面的适用性。
+
+从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
|数据源特点和需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|总体数据量巨大| | | √ |TDengine在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
-|数据输入速度偶尔或者持续巨大| | | √ | TDengine的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
-|数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
+|总体数据量巨大| | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
+|数据输入速度偶尔或者持续巨大| | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
+|数据源数目巨大| | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
### 系统架构要求
+
|系统架构要求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
-|要求容错和高可靠| | | √ |TDengine的集群功能,自动提供容错灾备等高可靠功能。|
-|标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能,遵守标准化规范。|
+|要求简单可靠的系统架构| | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
+|要求容错和高可靠| | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。|
+|标准化规范| | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。|
### 系统功能需求
+
|系统功能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
-|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑TDengine和关系型数据系统配合实现系统功能。|
+|要求完整的内置数据处理算法| | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
+|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。|
### 系统性能需求
+
|系统性能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
-|要求高速处理数据 | | | √ |TDengine的专门为IOT优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
-|要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。|
+|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
+|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
+|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
### 系统维护需求
+
|系统维护需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
-|要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
+|要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
|要求运维学习成本可控| | | √ |同上。|
-|要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
-
+|要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md
index 30803d977704606b042c589b96b649d99a850106..d262589a6fa757179a267aa55066b3a6c255df27 100644
--- a/documentation20/cn/02.getting-started/01.docker/docs.md
+++ b/documentation20/cn/02.getting-started/01.docker/docs.md
@@ -1,6 +1,6 @@
# 通过 Docker 快速体验 TDengine
-虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
+虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从2.0.14.0版本开始,TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台,像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
@@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c
```bash
$ docker -v
-Docker version 20.10.5, build 55c4c88
+Docker version 20.10.3, build 48d30b5
```
## 在 Docker 容器中运行 TDengine
@@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
```bash
-$ docker run -d tdengine/tdengine
-cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
+$ docker run -d --name tdengine tdengine/tdengine
+7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
```
-- **docker run**:通过 Docker 运行一个容器。
-- **-d**:让容器在后台运行。
-- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
-- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
+- **docker run**:通过 Docker 运行一个容器
+- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
+- **-d**:让容器在后台运行
+- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
+- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器
2,确认容器是否已经正确运行。
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
-cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
+c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**:列出所有正在运行状态的容器信息。
@@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
3,进入 Docker 容器内,使用 TDengine。
```bash
-$ docker exec -it cdf548465318 /bin/bash
-root@cdf548465318:~/TDengine-server-2.0.13.0#
+$ docker exec -it tdengine /bin/bash
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。
- **-t**:指定一个终端。
-- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
+- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。
4,进入容器后,执行 taos shell 客户端程序。
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
-Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
-taos>
+taos>
```
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
@@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
```bash
$ taos> q
-root@cdf548465318:~/TDengine-server-2.0.13.0#
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
2,在命令行界面执行 taosdemo。
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
-###################################################################
-# Server IP: localhost:0
-# User: root
-# Password: taosdata
-# Use metric: true
-# Datatype of Columns: int int int int int int int float
-# Binary Length(If applicable): -1
-# Number of Columns per record: 3
-# Number of Threads: 10
-# Number of Tables: 10000
-# Number of Data per Table: 100000
-# Records/Request: 1000
-# Database name: test
-# Table prefix: t
-# Delete method: 0
-# Test time: 2021-04-13 02:05:20
-###################################################################
+root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
+
+taosdemo is simulating data generated by power equipments monitoring...
+
+host: 127.0.0.1:6030
+user: root
+password: taosdata
+configDir:
+resultFile: ./output.txt
+thread num of insert data: 10
+thread num of create table: 10
+top insert interval: 0
+number of records per req: 30000
+max sql length: 1048576
+database count: 1
+database[0]:
+ database[0] name: test
+ drop: yes
+ replica: 1
+ precision: ms
+ super table count: 1
+ super table[0]:
+ stbName: meters
+ autoCreateTable: no
+ childTblExists: no
+ childTblCount: 10000
+ childTblPrefix: d
+ dataSource: rand
+ iface: taosc
+ insertRows: 10000
+ interlaceRows: 0
+ disorderRange: 1000
+ disorderRatio: 0
+ maxSqlLen: 1048576
+ timeStampStep: 1
+ startTimestamp: 2017-07-14 10:40:00.000
+ sampleFormat:
+ sampleFile:
+ tagsFile:
+ columnCount: 3
+column[0]:FLOAT column[1]:INT column[2]:FLOAT
+ tagCount: 2
+ tag[0]:INT tag[1]:BINARY(16)
+
+ Press enter key to continue or Ctrl-C to stop
```
-回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
+回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
+
+执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。**
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
-Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
-taos>
+taos>
```
- **查看数据库。**
@@ -124,8 +154,8 @@ taos>
```bash
$ taos> show databases;
name | created_time | ntables | vgroups | ···
- test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
- log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
+ test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
+ log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
```
@@ -136,10 +166,10 @@ $ taos> use test;
Database changed.
$ taos> show stables;
- name | created_time | columns | tags | tables |
-=====================================================================================
- meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
-Query OK, 1 row(s) in set (0.001737s)
+ name | created_time | columns | tags | tables |
+============================================================================================
+ meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
+Query OK, 1 row(s) in set (0.003259s)
```
@@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s)
```bash
$ taos> select * from test.t0 limit 10;
- ts | f1 | f2 | f3 |
-====================================================================
- 2017-07-14 02:40:01.000 | 3 | 9 | 0 |
- 2017-07-14 02:40:02.000 | 0 | 1 | 2 |
- 2017-07-14 02:40:03.000 | 7 | 2 | 3 |
- 2017-07-14 02:40:04.000 | 9 | 4 | 5 |
- 2017-07-14 02:40:05.000 | 1 | 2 | 5 |
- 2017-07-14 02:40:06.000 | 6 | 3 | 2 |
- 2017-07-14 02:40:07.000 | 4 | 7 | 8 |
- 2017-07-14 02:40:08.000 | 4 | 6 | 6 |
- 2017-07-14 02:40:09.000 | 5 | 7 | 7 |
- 2017-07-14 02:40:10.000 | 1 | 5 | 0 |
-Query OK, 10 row(s) in set (0.003638s)
+
+DB error: Table does not exist (0.002857s)
+taos> select * from test.d0 limit 10;
+ ts | current | voltage | phase |
+======================================================================================
+ 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
+ 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
+ 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
+ 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
+ 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
+ 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
+Query OK, 10 row(s) in set (0.016791s)
```
-- **查看 t0 表的标签值。**
+- **查看 d0 表的标签值。**
```bash
-$ taos> select areaid, loc from test.t0;
- areaid | loc |
-===========================
- 10 | shanghai |
-Query OK, 1 row(s) in set (0.002904s)
+$ taos> select groupid, location from test.d0;
+ groupid | location |
+=================================
+ 0 | shanghai |
+Query OK, 1 row(s) in set (0.003490s)
```
## 停止正在 Docker 中运行的 TDengine 服务
```bash
-$ docker stop cdf548465318
-cdf548465318
+$ docker stop tdengine
+tdengine
```
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
-- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
+- **tdengine**:容器名称。
## 编程开发时连接在 Docker 中的 TDengine
@@ -191,11 +224,11 @@ cdf548465318
1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
```bash
-$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
+$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
-{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
+{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
@@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash
-$ docker exec -it 526aa188da /bin/bash
+$ docker exec -it tdengine /bin/bash
```
-
diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md
index 4ae4ebf4d0f71ad9bce3588cbcce5e5750fb9728..a37afa9212911f4e48efe5e923607f3f2e05422a 100644
--- a/documentation20/cn/02.getting-started/docs.md
+++ b/documentation20/cn/02.getting-started/docs.md
@@ -22,7 +22,8 @@ TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟
具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。
-## 轻松启动
+
+## 轻松启动
安装成功后,用户可使用 `systemctl` 命令来启动 TDengine 的服务进程。
@@ -30,7 +31,7 @@ TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟
$ systemctl start taosd
```
-检查服务是否正常工作。
+检查服务是否正常工作:
```bash
$ systemctl status taosd
```
@@ -40,20 +41,20 @@ $ systemctl status taosd
**注意:**
- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
-- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting, 将其设为 0,就可将其关闭。
+- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting,将其设为 0,就可将其关闭。
- TDengine 采用 FQDN (一般就是 hostname )作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 hostname,在客户端应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
-* TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包:
+* TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包:
```bash
$ which systemctl
```
- 如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
+ 如果系统中不支持 `systemd`,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
-
-## TDengine 命令行程序
+
+## TDengine 命令行程序
执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。
@@ -83,14 +84,14 @@ select * from t;
Query OK, 2 row(s) in set (0.003128s)
```
-除执行 SQL 语句外,系统管理员还可以从 TDengine 终端检查系统运行状态,添加删除用户账号等。
+除执行 SQL 语句外,系统管理员还可以从 TDengine 终端进行检查系统运行状态、添加删除用户账号等操作。
-### 命令行参数
+**命令行参数**
您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数:
-- -c, --config-dir: 指定配置文件目录,默认为 _/etc/taos_
-- -h, --host: 指定服务的 FQDN 地址(也可以使用 IP),默认为连接本地服务
+- -c, --config-dir: 指定配置文件目录,默认为 `/etc/taos`
+- -h, --host: 指定服务的 FQDN 地址或 IP 地址,默认为连接本地服务
- -s, --commands: 在不进入终端的情况下运行 TDengine 命令
- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root
- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata
@@ -99,24 +100,25 @@ Query OK, 2 row(s) in set (0.003128s)
示例:
```bash
-$ taos -h 192.168.0.1 -s "use db; show tables;"
+$ taos -h h1.taos.com -s "use db; show tables;"
```
-### 运行 SQL 命令脚本
+**运行 SQL 命令脚本**
-TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本.
+TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本。
```mysql
taos> source ;
```
-### Shell 小技巧
+**Shell 小技巧**
- 可以使用上下光标键查看历史输入的指令
-- 修改用户密码,在 shell 中使用 alter user 指令
+- 修改用户密码:在 shell 中使用 `alter user` 命令,缺省密码为 taosdata
- ctrl+c 中止正在进行中的查询
-- 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema
-
+- 执行 `RESET QUERY CACHE` 可清除本地缓存的表 schema
+- 批量执行 SQL 语句。可以将一系列的 shell 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 shell 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句
+- 输入 q 回车,退出 taos shell
## TDengine 极速体验
@@ -164,14 +166,12 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
-
## 客户端和报警模块
如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。
报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。
-
## 支持平台列表
### TDengine 服务器支持的平台列表
@@ -191,8 +191,6 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
-
-
### TDengine 客户端和连接器支持的平台列表
目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。
@@ -210,7 +208,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
-注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
+注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。
diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md
index b481bea9f840ad459812f955aa76a8a7829d5b37..3e9877b4465eac2ca05d99c88a620a0c6bf89689 100644
--- a/documentation20/cn/03.architecture/docs.md
+++ b/documentation20/cn/03.architecture/docs.md
@@ -6,14 +6,15 @@
在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
-
+
+
+
设备ID
时间戳
采集量
标签
-
Device ID
Time Stamp
@@ -98,11 +99,11 @@
2
-
+
表1:智能电表数据示例
-每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表一中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
+每一条记录都有设备ID,时间戳,采集的物理量(如上图中的电流、电压、相位),还有与每个设备相关的静态标签(如上述表1中的位置Location和分组groupId)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
### 数据特征
@@ -141,7 +142,7 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine引入超级表(Super Table,简称为STable)的概念。
-超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的结合),除需要定义采集量的表结构之外,还需要定义其标签的schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。 如果整个系统有N个不同类型的数据采集点,就需要建立N个超级表。
+超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有N个不同类型的数据采集点,就需要建立N个超级表。
在TDengine的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。当为某个具体数据采集点创建表时,用户使用超级表的定义做模板,同时指定该具体采集点(表)的标签值。与传统的关系型数据库相比,表(一个数据采集点)是带有静态标签的,而且这些标签可以事后增加、删除、修改。**一张超级表包含有多张表,这些表具有相同的时序数据schema,但带有不同的标签值**。
@@ -160,39 +161,51 @@ TDengine 分布式架构的逻辑结构图如下:
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
-**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
+**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
-**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
+**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP)决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
-**虚拟节点(vnode)**: 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB,但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP,以及所属的VGroup ID在系统内唯一标识,由管理节点创建并管理。
+**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB,但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP,以及所属的VGroup ID在系统内唯一标识,由管理节点创建并管理。
-**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode,它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个dnode上至多有一个mnode,由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。
+**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode,它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个dnode上至多有一个mnode,由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。
-**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N,系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定,缺省为1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的ID,VGroup ID。如果两个虚拟节点的vnode group ID相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID是永远不变的,即使一个虚拟节点组被删除,它的ID也不会被收回重复利用。
+**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N,系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定,缺省为1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的ID,VGroup ID。如果两个虚拟节点的vnode group ID相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID是永远不变的,即使一个虚拟节点组被删除,它的ID也不会被收回重复利用。
-**TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, C/C++/C#/Python/Go/Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。
+**TAOSC:** taosc是TDengine给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供C/C++语言原生接口,内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC、C/C++、C#、Python、Go、Node.js接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的RESTful接口,taosc在TDengine集群的每个dnode上都有一运行实例。
### 节点之间的通讯
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
-**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
+**FQDN配置:**一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数"fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
+
+**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。
+
+- 集群内数据节点之间的数据复制操作占用一个TCP端口,是serverPort+10。
+- 集群数据节点对外提供RESTful服务占用一个TCP端口,是serverPort+11。
+- 集群内数据节点与Arbitrator节点之间通讯占用一个TCP端口,是serverPort+12。
-**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
+因此一个数据节点总的端口范围为serverPort到serverPort+12,总共13个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
-**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
+**集群对外连接:**TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
-**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpSet文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取节点配置参数firstEp, secondEp,(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点)如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。
+**集群内部通讯:**各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步:
-**MNODE的选择:** TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。
+1. 检查mnodeEpSet.json文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;
+2. 检查系统配置文件taos.cfg,获取节点配置参数firstEp、secondEp(这两个参数指定的节点可以是不带mnode的普通节点,这样的话,节点被连接时会尝试重定向到mnode节点),如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;
+3. 将自己的EP设为mnode EP,并独立运行起来。
-**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将firstEp, secondEp参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
+获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。
-**重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。
+**MNODE的选择:**TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。
+
+**新数据节点的加入:**系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将firstEp, secondEp参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
+
+**重定向:**无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。
### 一个典型的消息流程
-为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。
+为解释vnode、mnode、taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。

图 2 TDengine典型的操作流程
@@ -237,7 +250,7 @@ vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和
创建DB时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的vnode, 且该vnode是否有空余的表空间,如果有,立即在该有空位的vnode创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个dnode上创建一新的vnode, 然后创建表。如果DB有多个副本,系统不是只创建一个vnode,而是一个vgroup(虚拟数据节点组)。系统对vnode的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。
-每张表的meda data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。
+每张表的meta data(包含schema, 标签等)也存放于vnode里,而不是集中存放于mnode,实际上这是对Meta数据的分片,这样便于高效并行的进行标签过滤操作。
### 数据分区
@@ -249,64 +262,62 @@ TDengine除vnode分片之外,还对时序数据按照时间段进行分区。
每个dnode都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此mnode了解整个集群的状态。基于整体状态,当mnode发现某个dnode负载过重,它会将dnode上的一个或多个vnode挪到其他dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。
-如果mnode一段时间没有收到dnode的状态报告,mnode会认为这个dnode已经离线。如果离线时间超过一定时长(时长由配置参数offlineThreshold决定),该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于一,系统将自动在其他dnode上创建新的副本,以保证数据的副本数。如果该dnode上还有mnode, 而且mnode的副本数大于一,系统也将自动在其他dnode上创建新的mnode, 以保证mnode的副本数。
+如果mnode一段时间没有收到dnode的状态报告,mnode会认为这个dnode已经离线。如果离线时间超过一定时长(时长由配置参数offlineThreshold决定),该dnode将被mnode强制剔除出集群。该dnode上的vnodes如果副本数大于1,系统将自动在其他dnode上创建新的副本,以保证数据的副本数。如果该dnode上还有mnode, 而且mnode的副本数大于1,系统也将自动在其他dnode上创建新的mnode, 以保证mnode的副本数。
当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。
负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。
+
**提示:负载均衡由参数balance控制,决定开启/关闭自动负载均衡。**
## 数据写入与复制流程
-如果一个数据库有N个副本,那一个虚拟节点组就有N个虚拟节点,但是只有一个是Master,其他都是slave。当应用将新的记录写入系统时,只有Master vnode能接受写的请求。如果slave vnode收到写的请求,系统将通知taosc需要重新定向。
+如果一个数据库有N个副本,那一个虚拟节点组就有N个虚拟节点,但是只有一个是master,其他都是slave。当应用将新的记录写入系统时,只有master vnode能接受写的请求。如果slave vnode收到写的请求,系统将通知taosc需要重新定向。
-### Master vnode写入流程
+### Master Vnode写入流程
Master Vnode遵循下面的写入流程:

图 3 TDengine Master写入流程
-1. Master vnode收到应用的数据插入请求,验证OK,进入下一步;
+1. master vnode收到应用的数据插入请求,验证OK,进入下一步;
2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
-3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内slave vnodes, 该转发包带有数据的版本号(version);
+3. 如果有多个副本,vnode将把数据包转发给同一虚拟节点组内的slave vnodes, 该转发包带有数据的版本号(version);
4. 写入内存,并将记录加入到skip list;
-5. Master vnode返回确认信息给应用,表示写入成功。
+5. master vnode返回确认信息给应用,表示写入成功。
6. 如果第2,3,4步中任何一步失败,将直接返回错误给应用。
-### Slave vnode写入流程
+### Slave Vnode写入流程
-对于slave vnode, 写入流程是:
+对于slave vnode,写入流程是:

图 4 TDengine Slave写入流程
-1. Slave vnode收到Master vnode转发了的数据插入请求。
-2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
+1. slave vnode收到Master vnode转发了的数据插入请求。检查last version是否与master一致,如果一致,进入下一步。如果不一致,需要进入同步状态。
+2. 如果系统配置参数walLevel大于0,vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2,而且fsync设置为0,TDengine还将WAL数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。
3. 写入内存,更新内存中的skip list。
-与Master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。
+与master vnode相比,slave vnode不存在转发环节,也不存在回复确认环节,少了两步。但写内存与WAL是完全一样的。
### 异地容灾、IDC迁移
-从上述Master和Slave流程可以看出,TDengine采用的是异步复制的方式进行数据同步。这种方式能够大幅提高写入性能,网络延时对写入速度不会有大的影响。通过配置每个物理节点的IDC和机架号,可以保证对于一个虚拟节点组,虚拟节点由来自不同IDC、不同机架的物理节点组成,从而实现异地容灾。因此TDengine原生支持异地容灾,无需再使用其他工具。
+从上述master和slave流程可以看出,TDengine采用的是异步复制的方式进行数据同步。这种方式能够大幅提高写入性能,网络延时对写入速度不会有大的影响。通过配置每个物理节点的IDC和机架号,可以保证对于一个虚拟节点组,虚拟节点由来自不同IDC、不同机架的物理节点组成,从而实现异地容灾。因此TDengine原生支持异地容灾,无需再使用其他工具。
-另外一方面,TDengine支持动态修改副本数,一旦副本数增加,新加入的虚拟节点将立即进入数据同步流程,同步结束后,新加入的虚拟节点即可提供服务。而在同步过程中,master以及其他已经同步的虚拟节点都可以对外提供服务。利用这一特性,TDengine可以实现无服务中断的IDC机房迁移。只需要将新IDC的物理节点加入现有集群,等数据同步完成后,再将老的IDC的物理节点从集群中剔除即可。
+另一方面,TDengine支持动态修改副本数,一旦副本数增加,新加入的虚拟节点将立即进入数据同步流程,同步结束后,新加入的虚拟节点即可提供服务。而在同步过程中,master以及其他已经同步的虚拟节点都可以对外提供服务。利用这一特性,TDengine可以实现无服务中断的IDC机房迁移。只需要将新IDC的物理节点加入现有集群,等数据同步完成后,再将老的IDC的物理节点从集群中剔除即可。
但是,这种异步复制的方式,存在极小的时间窗口,丢失写入的数据。具体场景如下:
-1. Master vnode完成了它的5步操作,已经给APP确认写入成功,然后宕机;
-2. Slave vnode收到写入请求后,在第2步写入日志之前,处理失败
-3. Slave vnode将成为新的master, 从而丢失了一条记录
+1. master vnode完成了它的5步操作,已经给APP确认写入成功,然后宕机
+2. slave vnode收到写入请求后,在第2步写入日志之前,处理失败
+3. slave vnode将成为新的master,从而丢失了一条记录
-理论上,只要是异步复制,就无法保证100%不丢失。但是这个窗口极小,mater与slave要同时发生故障,而且发生在刚给应用确认写入成功之后。
-
-注:异地容灾、IDC无中断迁移,仅仅企业版支持。
-**提示:该功能暂未提供**
+理论上,只要是异步复制,就无法保证100%不丢失。但是这个窗口极小,master与slave要同时发生故障,而且发生在刚给应用确认写入成功之后。
### 主从选择
-Vnode会保持一个数据版本号(Version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增一。
+Vnode会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加1。
一个vnode启动时,角色(master、slave) 是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立TCP连接,并互相交换status,其中包括version和自己的角色。通过status的交换,系统进入选主流程,规则如下:
@@ -319,7 +330,7 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
### 同步复制
-对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此TDengine提供同步复制的机制供用户选择。在创建数据库时,除指定副本数replica之外,用户还需要指定新的参数quorum。如果quorum大于一,它表示每次Master转发给副本时,需要等待quorum-1个回复确认,才能通知应用,数据在slave已经写入成功。如果在一定的时间内,得不到quorum-1个回复确认,master vnode将返回错误给应用。
+对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此TDengine提供同步复制的机制供用户选择。在创建数据库时,除指定副本数replica之外,用户还需要指定新的参数quorum。如果quorum大于1,它表示每次master转发给副本时,需要等待quorum-1个回复确认,才能通知应用,数据在slave已经写入成功。如果在一定的时间内,得不到quorum-1个回复确认,master vnode将返回错误给应用。
采用同步复制,系统的性能会有所下降,而且latency会增加。因为元数据要强一致,mnode之间的数据同步缺省就是采用的同步复制。
@@ -329,19 +340,19 @@ Vnode会保持一个数据版本号(Version),对内存数据进行持久化存
TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。
-TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
+TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的key-value缓存系统再将之前缓存的数据重新加载到缓存中。
每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。
### 持久化存储
-TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。
+TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制地增长。
为充分利用时序数据特点,TDengine将一个vnode保存在持久化存储的数据切分成多个文件,每个文件只保存固定天数的数据,这个天数由系统配置参数days决定。切分成多个文件后,给定查询的起止日期,无需任何索引,就可以立即定位需要打开哪些数据文件,大大加快读取速度。
对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。
-给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`个。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
+给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`个。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。目前的版本,参数keep可以修改,但对于参数days,一旦设置后,不可修改。
在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数maxRows(每块最大记录条数)决定,缺省值为4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。
@@ -351,32 +362,47 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
### 多级存储
-在默认配置下,TDengine会将所有数据保存在/var/lib/taos目录下,而且每个vnode的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine可通过配置系统参数dataDir让多个挂载的硬盘被系统同时使用。除此之外,TDengine也提供了数据分级存储的功能,即根据数据文件的新老程度存储在不同的存储介质上。比如最新的数据存储在SSD上,超过一周的数据存储在本地硬盘上,超过4周的数据存储在网络存储设备上,这样来降低存储成本,而又保证高效的访问数据。数据在不同存储介质上的移动是由系统自动完成的,对应用是完全透明的。数据的分级存储也是通过系统参数dataDir来配置。
+说明:多级存储功能仅企业版支持,从 2.0.16.0 版本开始提供。
+
+在默认配置下,TDengine会将所有数据保存在/var/lib/taos目录下,而且每个vnode的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine可通过配置系统参数dataDir让多个挂载的硬盘被系统同时使用。
+
+除此之外,TDengine也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在SSD盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的HDD盘上。
+
+多级存储支持3级,每级最多可配置16个挂载点。
+
+TDengine多级存储配置方式如下(在配置文件/etc/taos/taos.cfg中):
-dataDir的配置格式如下:
```
-dataDir data_path [tier_level]
+dataDir [path]
```
-其中data_path为挂载点的文件夹路径,tier_level为介质存储等级。介质存储等级越高,盛放数据文件越老。同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。TDengine最多支持3级存储,所以tier_level的取值为0、1和2。在配置dataDir时,必须存在且只有一个挂载路径不指定tier_level,称之为特殊挂载盘(路径)。该挂载路径默认为0级存储介质,且包含特殊文件链接,不可被移除,否则会对写入的数据产生毁灭性影响。
-假设一物理节点有六个可挂载的硬盘/mnt/disk1、/mnt/disk2、…、/mnt/disk6,其中disk1和disk2需要被指定为0级存储介质,disk3和disk4为1级存储介质, disk5和disk6为2级存储介质。disk1为特殊挂载盘,则可在/etc/taos/taos.cfg中做如下配置:
+- path: 挂载点的文件夹路径
+- level: 介质存储等级,取值为0,1,2。
+ 0级存储最新的数据,1级存储次新的数据,2级存储最老的数据,省略默认为0。
+ 各级存储之间的数据流向:0级存储 -> 1级存储 -> 2级存储。
+ 同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
+ 需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
+- primary: 是否为主挂载点,0(是)或1(否),省略默认为1。
+
+在配置中,只允许一个主挂载点的存在(level=0, primary=0),例如采用如下的配置方式:
```
-dataDir /mnt/disk1/taos
-dataDir /mnt/disk2/taos 0
-dataDir /mnt/disk3/taos 1
-dataDir /mnt/disk4/taos 1
-dataDir /mnt/disk5/taos 2
-dataDir /mnt/disk6/taos 2
+dataDir /mnt/data1 0 0
+dataDir /mnt/data2 0 1
+dataDir /mnt/data3 1 1
+dataDir /mnt/data4 1 1
+dataDir /mnt/data5 2 1
+dataDir /mnt/data6 2 1
```
-挂载的盘也可以是非本地的网络盘,只要系统能访问即可。
-
-注:多级存储功能仅企业版支持
+注意:
+1. 多级存储不允许跨级配置,合法的配置方案有:仅0级,仅0级+1级,以及0级+1级+2级。而不允许只配置level=0和level=2,而不配置level=1。
+2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
+3. 多级存储目前不支持删除已经挂载的硬盘的功能。
## 数据查询
-TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode, mnode节点协同完成。
+TDengine提供了多种多样针对表和超级表的查询处理功能,除了常规的聚合查询之外,还提供针对时序数据的窗口查询、统计聚合等功能。TDengine的查询处理需要客户端、vnode、mnode节点协同完成。
### 单表查询
@@ -388,31 +414,31 @@ SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成
### 按时间轴聚合、降采样、插值
-时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。
+时序数据有别于普通数据的显著特征是每条记录均具有时间戳,因此针对具有时间戳的数据在时间轴上进行聚合是不同于普通数据库的重要功能。从这点上来看,与流计算引擎的窗口查询有相似的地方。
在TDengine中引入关键词interval来进行时间轴上固定长度时间窗口的切分,并按照时间窗口对数据进行聚合,对窗口范围内的数据按需进行聚合。例如:
-```mysql
-select count(*) from d1001 interval(1h);
+```sql
+SELECT COUNT(*) FROM d1001 INTERVAL(1h);
```
针对d1001设备采集的数据,按照1小时的时间窗口返回每小时存储的记录数量。
-在需要连续获得查询结果的应用场景下,如果给定的时间区间存在数据缺失,会导致该区间数据结果也丢失。TDengine提供策略针对时间轴聚合计算的结果进行插值,通过使用关键词Fill就能够对时间轴聚合结果进行插值。例如:
-```mysql
-select count(*) from d1001 interval(1h) fill(prev);
+在需要连续获得查询结果的应用场景下,如果给定的时间区间存在数据缺失,会导致该区间数据结果也丢失。TDengine提供策略针对时间轴聚合计算的结果进行插值,通过使用关键词fill就能够对时间轴聚合结果进行插值。例如:
+```sql
+SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 23:59:59' INTERVAL(1h) FILL(PREV);
```
针对d1001设备采集数据统计每小时记录数,如果某一个小时不存在数据,则返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。
### 多表聚合查询
-TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:
+TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示:

图 5 多表聚合查询原理图
1. 应用将一个查询条件发往系统;
-2. taosc将超级表的名字发往 Meta Node(管理节点);
+2. taosc将超级表的名字发往 meta node(管理节点);
3. 管理节点将超级表所拥有的 vnode 列表发回 taosc;
4. taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点;
5. 每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc;
diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md
index 4ea592bd4af180ba8aca0a34d1d1817cf4df03ca..586997373726c835c0fcdb6d80820b534f21d758 100644
--- a/documentation20/cn/04.model/docs.md
+++ b/documentation20/cn/04.model/docs.md
@@ -1,19 +1,19 @@
-# 数据建模
+# TDengine数据建模
-TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
+TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。
## 创建库
-不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
+不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```mysql
-CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
+CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1;
```
-上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。
+上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为6,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。
创建库之后,需要使用SQL命令USE将当前库切换过来,例如:
@@ -21,16 +21,17 @@ CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
USE power;
```
-就当前连接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。
+将当前连接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。
**注意:**
- 任何一张表或超级表是属于一个库的,在创建表之前,必须先创建库。
- 处于两个不同库的表是不能进行JOIN操作的。
+- 创建并插入记录、查询历史记录的时候,均需要指定时间戳。
## 创建超级表
-一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表:
+一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一个超级表。以[表1](https://www.taosdata.com/cn/documentation/architecture#model_table1)中的智能电表为例,可以使用如下的SQL命令创建超级表:
```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
@@ -42,11 +43,11 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。
-一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。
+一张超级表最多容许 1024 列,如果一个采集点采集的物理量个数超过 1024,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。(从 2.1.7.0 版本开始,列数限制由 1024 列放宽到了 4096 列。)
## 创建表
-TDengine对每个数据采集点需要独立建表。与标准的关系型数据一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以表一中的智能电表为例,可以使用如下的SQL命令建表:
+TDengine对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表1](https://www.taosdata.com/cn/documentation/architecture#model_table1)中的智能电表为例,可以使用如下的SQL命令建表:
```mysql
CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
@@ -61,10 +62,10 @@ TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列
**自动建表**:在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。比如:
```mysql
-INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
+INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
```
-上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值“Beijing.Chaoyang", 2。
+上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建,则使用超级表meters做模板自动创建,同时打上标签值 `“Beijing.Chaoyang", 2`。
关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。
@@ -72,5 +73,5 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
-TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
+TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得更简单。
diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md
index ce2d65e7d2259c6dac9efc67a61f7c009dd96984..556d51759cb126f3b49b032b6efeb7e9924f864c 100644
--- a/documentation20/cn/05.insert/docs.md
+++ b/documentation20/cn/05.insert/docs.md
@@ -2,9 +2,9 @@
TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。
-## SQL写入
+## SQL 写入
-应用通过C/C++, JDBC, GO, 或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中:
+应用通过C/C++、JDBC、GO、C#或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
@@ -23,20 +23,83 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
**Tips:**
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M)。
-- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。
-- 对同一张表,如果新插入记录的时间戳已经存在,默认(没有使用 UPDATE 1 创建数据库)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。
-- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。
+- TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。
+- 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。
+- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。
-## Prometheus直接写入
+## Schemaless 写入
-[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。
-### 从源代码编译blm_prometheus
+### Schemaless 数据行协议
+
+Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下:
+```json
+measurement,tag_set field_set timestamp
+```
+
+其中,
+* measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。
+* tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。
+* field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。
+* timestamp 即本行数据对应的主键时间戳。
+
+在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
+* 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。
+* 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。
+* 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
+* 数值类型将通过后缀来区分数据类型:
+ - 没有后缀,为 FLOAT 类型;
+ - 后缀为 f32,为 FLOAT 类型;
+ - 后缀为 f64,为 DOUBLE 类型;
+ - 后缀为 i8,表示为 TINYINT (INT8) 类型;
+ - 后缀为 i16,表示为 SMALLINT (INT16) 类型;
+ - 后缀为 i32,表示为 INT (INT32) 类型;
+ - 后缀为 i64,表示为 BIGINT (INT64) 类型;
+* t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
+
+timestamp 位置的时间戳通过后缀来声明时间精度,具体如下:
+* 不带任何后缀的长整数会被当作微秒来处理;
+* 当后缀为 s 时,表示秒时间戳;
+* 当后缀为 ms 时,表示毫秒时间戳;
+* 当后缀为 us 时,表示微秒时间戳;
+* 当后缀为 ns 时,表示纳秒时间戳;
+* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。
+
+例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。
+```json
+st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns
+```
+
+需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。
+
+### Schemaless 的处理逻辑
+
+Schemaless 按照如下原则来处理行数据:
+1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。
+2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。
+3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。
+4. 如果指定的数据子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
+5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。
+6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。
+7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
+8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。
+9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
+
+**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。
+
+关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。
+
+## Prometheus 直接写入
+
+[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需对Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用Bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
+
+### 从源代码编译 blm_prometheus
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
- Linux操作系统的服务器
-- 安装好Golang, 1.10版本以上
-- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
+- 安装好Golang,1.10版本以上
+- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
Bailongma项目中有一个文件夹blm_prometheus,存放了prometheus的写入API程序。编译过程如下:
```bash
@@ -46,23 +109,26 @@ go build
一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。
-### 安装Prometheus
+### 安装 Prometheus
-通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/)
+通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)。
-### 配置Prometheus
+### 配置 Prometheus
-参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置
+参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置:
-- url: bailongma API服务提供的URL, 参考下面的blm_prometheus启动示例章节
+```
+ - url: "bailongma API服务提供的URL"(参考下面的blm_prometheus启动示例章节)
+```
启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。
-### 启动blm_prometheus程序
+### 启动 blm_prometheus 程序
+
blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。
-```sh
+```bash
--tdengine-name
-如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name
+如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name。
--batch-size
blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
@@ -71,10 +137,10 @@ blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求
设置在TDengine中创建的数据库名称,blm_prometheus会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
--dbuser
-设置访问TDengine的用户名,缺省值是'root'
+设置访问TDengine的用户名,缺省值是'root'。
--dbpassword
-设置访问TDengine的密码,缺省值是'taosdata'
+设置访问TDengine的密码,缺省值是'taosdata'。
--port
blm_prometheus对prometheus提供服务的端口号。
@@ -92,7 +158,8 @@ remote_write:
- url: "http://10.1.2.3:8088/receive"
```
-### 查询prometheus写入数据
+### 查询 prometheus 写入数据
+
prometheus产生的数据格式如下:
```json
{
@@ -103,10 +170,10 @@ prometheus产生的数据格式如下:
instance="192.168.99.116:8443",
job="kubernetes-apiservers",
le="125000",
- resource="persistentvolumes", s
- cope="cluster",
+ resource="persistentvolumes",
+ scope="cluster",
verb="LIST",
- version=“v1"
+ version="v1"
}
}
```
@@ -116,17 +183,17 @@ use prometheus;
select * from apiserver_request_latencies_bucket;
```
-## Telegraf直接写入
+## Telegraf 直接写入
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
-### 从源代码编译blm_telegraf
+### 从源代码编译 blm_telegraf
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
- Linux操作系统的服务器
-- 安装好Golang, 1.10版本以上
-- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
+- 安装好Golang,1.10版本以上
+- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下:
@@ -137,33 +204,34 @@ go build
一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。
-### 安装Telegraf
+### 安装 Telegraf
-目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads
+目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。
-### 配置Telegraf
+### 配置 Telegraf
修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。
在output plugins部分,增加[[outputs.http]]配置项:
-- url: bailongma API服务提供的URL, 参考下面的启动示例章节
-- data_format: "json"
-- json_timestamp_units: "1ms"
+- url:Bailongma API服务提供的URL,参考下面的启动示例章节
+- data_format:"json"
+- json_timestamp_units:"1ms"
在agent部分:
-- hostname: 区分不同采集设备的机器名称,需确保其唯一性
+- hostname: 区分不同采集设备的机器名称,需确保其唯一性。
- metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。
-### 启动blm_telegraf程序
+### 启动 blm_telegraf 程序
+
blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。
-```sh
+```bash
--host
-TDengine服务端的IP地址,缺省值为空
+TDengine服务端的IP地址,缺省值为空。
--batch-size
blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
@@ -172,10 +240,10 @@ blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,
设置在TDengine中创建的数据库名称,blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
--dbuser
-设置访问TDengine的用户名,缺省值是'root'
+设置访问TDengine的用户名,缺省值是'root'。
--dbpassword
-设置访问TDengine的密码,缺省值是'taosdata'
+设置访问TDengine的密码,缺省值是'taosdata'。
--port
blm_telegraf对telegraf提供服务的端口号。
@@ -183,18 +251,18 @@ blm_telegraf对telegraf提供服务的端口号。
### 启动示例
-通过以下命令启动一个blm_telegraf的API服务
+通过以下命令启动一个blm_telegraf的API服务:
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
-假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项:
+假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
-### 查询telegraf写入数据
+### 查询 telegraf 写入数据
telegraf产生的数据格式如下:
```json
@@ -221,18 +289,16 @@ telegraf产生的数据格式如下:
}
```
-其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,Timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
+其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
```mysql
use telegraf;
select * from cpu;
```
-MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
-
## EMQ Broker 直接写入
-[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考[EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
+MQTT是流行的物联网数据传输协议,[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务的方式保存数据到 TDEngine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
## HiveMQ Broker 直接写入
diff --git a/documentation20/cn/06.queries/docs.md b/documentation20/cn/06.queries/docs.md
index 5557134aac23b4f69066c9fb41aaa51972fcbba3..32b74d1b23416814b39addb68303587ecc0ba3f8 100644
--- a/documentation20/cn/06.queries/docs.md
+++ b/documentation20/cn/06.queries/docs.md
@@ -3,10 +3,10 @@
## 主要查询功能
-TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能:
+TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, C#, Python, Node.js 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行(Command Line Interface, CLI)工具 TAOS Shell 手动执行 SQL 即席查询(Ad-Hoc Query)。TDengine 支持如下查询功能:
- 单列、多列数据查询
-- 标签和数值的多种过滤条件:>, <, =, <>, like 等
+- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset)
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
diff --git a/documentation20/cn/07.advanced-features/docs.md b/documentation20/cn/07.advanced-features/docs.md
index 1b4ccb4814f7adcc72c250c07bc2ec6151ea5f76..32e7a2aabdce54d65a352d8bf91395c3cfc9b32d 100644
--- a/documentation20/cn/07.advanced-features/docs.md
+++ b/documentation20/cn/07.advanced-features/docs.md
@@ -35,13 +35,13 @@ select avg(voltage) from meters interval(1m) sliding(30s);
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
-这样做没有问题,但TDengine提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如:
+这样做没有问题,但TDengine提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了,例如:
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
-会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如:
+会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。例如:
```mysql
taos> select * from avg_vol;
@@ -138,7 +138,7 @@ select * from meters where ts > now - 1d and current > 10;
订阅的`topic`实际上是它的名字,因为订阅功能是在客户端API中实现的,所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。
-如果名`topic`的订阅不存在,参数`restart`没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时,`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且`restart`是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。
+如果名为`topic`的订阅不存在,参数`restart`没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时,`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且`restart`是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用`taos_consume`的时间间隔小于此时间,`taos_consume`会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@@ -179,7 +179,8 @@ void print_result(TAOS_RES* res, int blockFetch) {
} else {
while ((row = taos_fetch_row(res))) {
char temp[256];
- taos_print_row(temp, row, fields, num_fields);puts(temp);
+ taos_print_row(temp, row, fields, num_fields);
+ puts(temp);
nRows++;
}
}
@@ -211,14 +212,14 @@ taos_unsubscribe(tsub, keep);
则可以在示例代码所在目录执行以下命令来编译并启动示例程序:
-```shell
+```bash
$ make
$ ./subscribe -sql='select * from meters where current > 10;'
```
示例程序启动后,打开另一个终端窗口,启动 TDengine 的 shell 向 **D1001** 插入一条电流为 12A 的数据:
-```shell
+```sql
$ taos
> use test;
> insert into D1001 values(now, 12, 220, 1);
@@ -313,7 +314,7 @@ public class SubscribeDemo {
运行示例程序,首先,它会消费符合查询条件的所有历史数据:
-```shell
+```bash
# java -jar subscribe.jar
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
@@ -333,16 +334,16 @@ taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
因为这条数据的电流大于10A,示例程序会将其消费:
-```shell
+```
ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
```
## 缓存(Cache)
-TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。
+TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。
-TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署额外的缓存系统,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
+TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署额外的缓存系统,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的key-value缓存系统再将之前缓存的数据重新加载到缓存中。
TDengine分配固定大小的内存空间作为缓存空间,缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间,TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点(virtual node)创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池,不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 511bab8a605ce666d263d609d1599e30c85d78c4..b4537adad6f014712911d568a948b81f866b45f4 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -1,6 +1,6 @@
# Java Connector
-TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
+## 总体介绍
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
@@ -12,72 +12,118 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实
* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。
* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。
-TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
+TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持嵌套查询(nested query)。
-* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
+### JDBC-JNI和JDBC-RESTful的对比
-## JDBC-JNI和JDBC-RESTful的对比
-
-
+
对比项 JDBC-JNI JDBC-RESTful
- 支持的操作系统
- linux、windows
- 全平台
+ 支持的操作系统
+ linux、windows
+ 全平台
- 是否需要安装 client
- 需要
- 不需要
+ 是否需要安装 client
+ 需要
+ 不需要
- server 升级后是否需要升级 client
- 需要
- 不需要
+ server 升级后是否需要升级 client
+ 需要
+ 不需要
- 写入性能
- JDBC-RESTful 是 JDBC-JNI 的 50%~90%
+ 写入性能
+ JDBC-RESTful 是 JDBC-JNI 的 50%~90%
- 查询性能
- JDBC-RESTful 与 JDBC-JNI 没有差别
+ 查询性能
+ JDBC-RESTful 与 JDBC-JNI 没有差别
-注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
+注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.2.0.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如:
+```sql
+INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
+```
+
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+
+| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
+| -------------------- | ----------------- | -------- |
+| 2.0.33 - 2.0.34 | 2.0.3.0 及以上 | 1.8.x |
+| 2.0.31 - 2.0.32 | 2.1.3.0 及以上 | 1.8.x |
+| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
+| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
+| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
+| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
+
+## TDengine DataType 和 Java DataType
+
+TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
+
+| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) |
+| ----------------- | ------------------ | ------------------ |
+| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
+| INT | java.lang.Integer | java.lang.Integer |
+| BIGINT | java.lang.Long | java.lang.Long |
+| FLOAT | java.lang.Float | java.lang.Float |
+| DOUBLE | java.lang.Double | java.lang.Double |
+| SMALLINT | java.lang.Short | java.lang.Short |
+| TINYINT | java.lang.Byte | java.lang.Byte |
+| BOOL | java.lang.Boolean | java.lang.Boolean |
+| BINARY | java.lang.String | byte array |
+| NCHAR | java.lang.String | java.lang.String |
+
+## 安装Java Connector
+
+### 安装前准备
-## 如何获取 taos-jdbcdriver
+使用Java Connector连接数据库前,需要具备以下条件:
+1. Linux或Windows操作系统
+2. Java 1.8以上运行时环境
+3. TDengine-client(使用JDBC-JNI时必须,使用JDBC-RESTful时非必须)
-### maven 仓库
+**注意**:由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
+**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
-* [sonatype][8]
-* [mvnrepository][9]
-* [maven.aliyun][10]
+### 通过maven获取JDBC driver
-maven 项目中使用如下 pom.xml 配置即可:
+目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
+- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
+- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
+- [maven.aliyun](https://maven.aliyun.com/mvn/search)
-```xml
+maven 项目中,在pom.xml 中添加以下依赖:
+```xml-dtd
- com.taosdata.jdbc
- taos-jdbcdriver
- 2.0.18
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.18
```
-### 源码编译打包
-
-下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
+### 通过源码编译获取JDBC driver
+可以通过下载TDengine的源码,自己编译最新版本的java connector
+```shell
+git clone https://github.com/taosdata/TDengine.git
+cd TDengine/src/connector/jdbc
+mvn clean package -Dmaven.test.skip=true
+```
+编译后,在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。
-## JDBC的使用说明
+## Java连接器的使用
### 获取连接
@@ -94,13 +140,11 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
-
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示:
-
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
@@ -109,15 +153,9 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
-**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
-
-* libtaos.so
- 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
+**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。
-* taos.dll
- 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
+> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF) 连接远程 TDengine Server。
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
@@ -125,14 +163,15 @@ TDengine 的 JDBC URL 规范格式为:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下:
-* user:登录 TDengine 用户名,默认值 root。
-* password:用户登录密码,默认值 taosdata。
-* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* user:登录 TDengine 用户名,默认值 'root'。
+* password:用户登录密码,默认值 'taosdata'。
+* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
* charset:客户端使用的字符集,默认值为系统字符集。
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
-
-
+* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
+* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
+* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
#### 指定URL和Properties获取连接
@@ -155,19 +194,19 @@ public Connection getConn() throws Exception{
以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
properties 中的配置参数如下:
-* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。
-* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。
-* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
+* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。
+* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。
+* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。
-
-
+* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
+* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
+* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
#### 使用客户端配置文件建立连接
-当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
-如下所示:
+当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示:
1. 在 Java 应用中不指定 hostname 和 port
@@ -201,6 +240,7 @@ secondEp cluster_node2:6030
```
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
+
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
@@ -214,7 +254,7 @@ TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可
例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。
-> 更多详细配置请参考[客户端配置][13]
+> 更多详细配置请参考[客户端配置](https://www.taosdata.com/cn/documentation/administrator/#client)
### 创建数据库和表
@@ -242,8 +282,8 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
-> now 为系统内部函数,默认为服务器当前时间。
-> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
+> now 为系统内部函数,默认为客户端所在计算机当前时间。
+> `now + 1s` 代表客户端当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒),s(秒),m(分),h(小时),d(天),w(周),n(月),y(年)。
### 查询数据
@@ -284,6 +324,7 @@ try (Statement statement = connection.createStatement()) {
```
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
+
具体的错误码请参考:
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
@@ -364,11 +405,12 @@ public void setShort(int columnIndex, ArrayList list) throws SQLException
public void setString(int columnIndex, ArrayList list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList list, int size) throws SQLException
```
+
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
-### 订阅
+## 订阅
-#### 创建
+### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
@@ -382,7 +424,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
-#### 消费数据
+### 消费数据
```java
int total = 0;
@@ -400,7 +442,7 @@ while(true) {
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
-#### 关闭订阅
+### 关闭订阅
```java
sub.close(true);
@@ -408,7 +450,7 @@ sub.close(true);
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
-### 关闭资源
+## 关闭资源
```java
resultSet.close();
@@ -418,23 +460,11 @@ conn.close();
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
-
-
## 与连接池使用
-**HikariCP**
+### HikariCP
-* 引入相应 HikariCP maven 依赖:
-
-```xml
-
- com.zaxxer
- HikariCP
- 3.4.1
-
-```
-
-* 使用示例如下:
+使用示例如下:
```java
public static void main(String[] args) throws SQLException {
@@ -464,21 +494,11 @@ conn.close();
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
-> 更多 HikariCP 使用问题请查看[官方说明][5]
+> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。
-**Druid**
+### Druid
-* 引入相应 Druid maven 依赖:
-
-```xml
-
- com.alibaba
- druid
- 1.1.20
-
-```
-
-* 使用示例如下:
+使用示例如下:
```java
public static void main(String[] args) throws Exception {
@@ -505,13 +525,13 @@ public static void main(String[] args) throws Exception {
}
```
-> 更多 druid 使用问题请查看[官方说明][6]
+> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。
-**注意事项**
+**注意事项:**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1`。
-```shell
+```sql
taos> select server_status();
server_status()|
================
@@ -519,47 +539,20 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
+## 在框架中使用
+* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
+* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
-## 与框架使用
-
-* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
-* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
-
-
-
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
-
-| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
-| -------------------- | ----------------- | -------- |
-| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
-| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
-| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
-| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
-| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
-| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
-
-
-
-## TDengine DataType 和 Java DataType
-
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
-
-| TDengine DataType | Java DataType |
-| ----------------- | ------------------ |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT | java.lang.Short |
-| TINYINT | java.lang.Byte |
-| BOOL | java.lang.Boolean |
-| BINARY | byte array |
-| NCHAR | java.lang.String |
+## 示例程序
+示例程序源码位于TDengine/test/examples/JDBC下:
+* JDBCDemo:JDBC示例源程序
+* JDBCConnectorChecker:JDBC安装校验源程序及jar包
+* Springbootdemo:springboot示例源程序
+* SpringJdbcTemplate:SpringJDBC模板
+请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## 常见问题
@@ -567,7 +560,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
**原因**:程序没有找到依赖的本地函数库 taos。
- **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
+ **解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@@ -575,21 +568,5 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
**解决方法**:重新安装 64 位 JDK。
-* 其它问题请参考 [Issues][7]
-
-[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[3]: https://github.com/taosdata/TDengine
-[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
-[5]: https://github.com/brettwooldridge/HikariCP
-[6]: https://github.com/alibaba/druid
-[7]: https://github.com/taosdata/TDengine/issues
-[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
-[10]: https://maven.aliyun.com/mvn/search
-[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
-[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
-[13]: https://www.taosdata.com/cn/documentation/administrator/#client
-[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
-[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF
+* 其它问题请参考 [Issues](https://github.com/taosdata/TDengine/issues)
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 90bcc511935eafeb962171c15483b0dedb7b2683..3167404f8067610f0bf5f74fe41320decdcbcdf0 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -17,13 +17,13 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ |
-其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。
+其中 ● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。
注意:
* 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。
* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。
-* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
+* 升级 TDengine 到 2.0.8.0 版本的用户,必须更新 JDBC。连接 TDengine 必须升级 taos-jdbcdriver 到 2.0.12 及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。
* 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。
## 安装连接器驱动步骤
@@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Linux**
-**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载:**
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
@@ -46,7 +46,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
`tar -xzvf TDengine-client-xxxxxxxxx.tar.gz`
-其中xxxxxxx需要替换为实际版本的字符串。
+其中xxxxxxxxx需要替换为实际版本的字符串。
**3. 执行安装脚本**
@@ -58,17 +58,20 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
-运行install_client.sh进行安装
+运行install_client.sh进行安装。
**4. 配置taos.cfg**
编辑taos.cfg文件(默认路径/etc/taos/taos.cfg),将firstEP修改为TDengine服务器的End Point,例如:h1.taos.com:6030
-**提示: 如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。**
+**提示: **
+
+1. **如本机没有部署TDengine服务,仅安装了应用驱动,则taos.cfg中仅需配置firstEP,无需配置FQDN。**
+2. **为防止与服务器端连接时出现“unable to resolve FQDN”错误,建议确认客户端的hosts文件已经配置正确的FQDN值。**
**Windows x64/x86**
-**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
+**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载:**
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
@@ -95,17 +98,16 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**提示:**
-**1. 如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:** **192.168.1.99 h1.taos.com**
-
-**2.卸载:运行unins000.exe可卸载TDengine应用驱动。**
+1. **如利用FQDN连接服务器,必须确认本机网络环境DNS已配置好,或在hosts文件中添加FQDN寻址记录,如编辑C:\Windows\system32\drivers\etc\hosts,添加如下的记录:`192.168.1.99 h1.taos.com` **
+2. **卸载:运行unins000.exe可卸载TDengine应用驱动。**
-**安装验证**
+### 安装验证
以上安装和配置完成后,并确认TDengine服务已经正常启动运行,此时可以执行taos客户端进行登录。
**Linux环境:**
-在linux shell下直接执行 taos,应该就能正常链接到tdegine服务,进入到taos shell界面,示例如下:
+在Linux shell下直接执行 taos,应该就能正常连接到TDegine服务,进入到taos shell界面,示例如下:
```mysql
$ taos
@@ -146,7 +148,10 @@ taos>
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** |
-C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 _taos.h_(安装后,位于 _/usr/local/taos/include_):
+C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 *taos.h*,里面列出了提供的API的函数原型。安装后,taos.h位于:
+
+- Linux:`/usr/local/taos/include`
+- Windows:`C:\TDengine\include`
```C
#include
@@ -156,9 +161,22 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
* 在编译时需要链接TDengine动态库。Linux 为 *libtaos.so* ,安装后,位于 _/usr/local/taos/driver_。Windows为 taos.dll,安装后位于 *C:\TDengine*。
* 如未特别说明,当API的返回值是整数时,_0_ 代表成功,其它是代表失败原因的错误码,当返回值是指针时, _NULL_ 表示失败。
+* 在 taoserror.h中有所有的错误码,以及对应的原因描述。
+
+### 示例程序
使用C/C++连接器的示例代码请参见 https://github.com/taosdata/TDengine/tree/develop/tests/examples/c 。
+示例程序源码也可以在安装目录下的 examples/c 路径下找到:
+
+**apitest.c、asyncdemo.c、demo.c、prepare.c、stream.c、subscribe.c**
+
+该目录下有makefile,在Linux环境下,直接执行make就可以编译得到执行文件。
+
+在一台机器上启动TDengine服务,执行这些示例程序,按照提示输入TDengine服务的FQDN,就可以正常运行,并打印出信息。
+
+**提示:**在ARM环境下编译时,请将makefile中的-msse4.2打开,这个选项只有在x64/x86硬件平台上才能支持。
+
### 基础API
基础API用于完成创建数据库连接等工作,为其它API的执行提供运行时环境。
@@ -173,7 +191,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
- 设置客户端选项,目前只支持时区设置(_TSDB_OPTION_TIMEZONE_)和编码设置(_TSDB_OPTION_LOCALE_)。时区和编码默认为操作系统当前设置。
+ 设置客户端选项,目前支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。
- `char *taos_get_client_info()`
@@ -187,7 +205,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- user:用户名
- pass:密码
- db:数据库名字,如果用户没有提供,也可以正常连接,用户可以通过该连接创建新的数据库,如果用户提供了数据库名字,则说明该数据库用户已经创建好,缺省使用该数据库
- - port:端口号
+ - port:TDengine管理主节点的端口号
返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。
@@ -201,7 +219,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `void taos_close(TAOS *taos)`
- 关闭连接, 其中`taos`是`taos_connect`函数返回的指针。
+ 关闭连接,其中`taos`是`taos_connect`函数返回的指针。
### 同步查询API
@@ -237,13 +255,13 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
- 获取查询结果集每列数据的属性(数据类型、名字、字节数),与taos_num_fileds配合使用,可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
+ 获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与taos_num_fileds配合使用,可用来解析`taos_fetch_row`返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
```c
typedef struct taosField {
char name[65]; // 列名
uint8_t type; // 数据类型
- int16_t bytes; // 字节数
+ int16_t bytes; // 长度,单位是字节
} TAOS_FIELD;
```
@@ -271,7 +289,7 @@ typedef struct taosField {
异步API都需要应用提供相应的回调函数,回调函数参数设置如下:前两个参数都是一致的,第三个参数依不同的API而定。第一个参数param是应用调用异步API时提供给系统的,用于回调时,应用能够找回具体操作的上下文,依具体实现而定。第二个参数是SQL操作的结果集,如果为空,比如insert操作,表示没有记录返回,如果不为空,比如select操作,表示有记录返回。
-异步API对于使用者的要求相对较高,用户可根据具体应用场景选择性使用。下面是三个重要的异步API:
+异步API对于使用者的要求相对较高,用户可根据具体应用场景选择性使用。下面是两个重要的异步API:
- `void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);`
@@ -294,7 +312,7 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
### 参数绑定 API
-除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。
+除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。文档中有时也会把此功能称为“原生接口写入”。
从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下:
1. 调用 `taos_stmt_init` 创建参数绑定对象;
@@ -385,6 +403,25 @@ typedef struct TAOS_MULTI_BIND {
(2.1.3.0 版本新增)
用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。
+
+### Schemaless 方式写入接口
+
+除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](https://www.taosdata.com/cn/documentation/insert#schemaless) 章节,这里介绍与之配套使用的 C/C++ API。
+
+- `int taos_insert_lines(TAOS* taos, char* lines[], int numLines)`
+
+ (2.2.0.0 版本新增)
+ 以 Schemaless 格式写入多行数据。其中:
+ * taos:调用 taos_connect 返回的数据库连接。
+ * lines:由 char 字符串指针组成的数组,指向本次想要写入数据库的多行数据。
+ * numLines:lines 数据的总行数。
+
+ 返回值为 0 表示写入成功,非零值表示出错。具体错误代码请参见 [taoserror.h](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) 文件。
+
+ 说明:
+ 1. 此接口是一个同步阻塞式接口,使用时机与 `taos_query()` 一致。
+ 2. 在调用此接口之前,必须先调用 `taos_select_db()` 来确定目前是在向哪个 DB 来写入。
+
### 连续查询接口
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
@@ -392,11 +429,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
- `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))`
该API用来创建数据流,其中:
- * taos:已经建立好的数据库连接
- * sql:SQL查询语句(仅能使用查询语句)
+ * taos:已经建立好的数据库连接。
+ * sql:SQL查询语句(仅能使用查询语句)。
* fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。
* stime:是流式计算开始的时间。如果是“64位整数最小值”,表示从现在开始;如果不为“64位整数最小值”,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)。
- * param:是应用提供的用于回调的一个参数,回调时,提供给应用
+ * param:是应用提供的用于回调的一个参数,回调时,提供给应用。
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
返回值为NULL,表示创建失败;返回值不为空,表示成功。
@@ -440,18 +477,29 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
+
+
## Python Connector
Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html)
-### 安装准备
-* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
-* 已安装python 2.7 or >= 3.4
-* 已安装pip 或 pip3
+**安装**:参见下面具体步骤
+
+**示例程序**:位于install_directory/examples/python
+
+### 安装
-### Python客户端安装
+Python连接器支持的系统有:Linux 64/Windows x64
-#### Linux
+安装前准备:
+
+- 已安装好TDengine应用驱动,请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
+- 已安装python 2.7 or >= 3.4
+- 已安装pip
+
+### Python连接器安装
+
+**Linux**
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装:
@@ -461,9 +509,10 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
`pip3 install src/connector/python/`
-#### Windows
-在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面
-```cmd
+**Windows**
+
+在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\Windows\system32" 目录下, 然后进入Windows *cmd* 命令行界面
+```bash
cd C:\TDengine\connector\python
python -m pip install .
```
@@ -471,7 +520,37 @@ python -m pip install .
* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
-### 使用
+### 示例程序
+
+示例程序源码位于install_directory/examples/Python,有:
+**read_example.py Python示例源程序**
+
+用户可以参考read_example.py这个程序来设计用户自己的写入、查询程序。
+
+在安装了对应的应用驱动后,通过import taos引入taos类。主要步骤如下:
+
+- 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。
+
+- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
+
+- 通过游标对象的execute()方法,执行写入或查询的SQL语句。
+
+- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows。
+
+- 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。
+
+### 安装验证
+
+运行如下指令:
+
+```bash
+cd {install_directory}/examples/python/PYTHONConnectorChecker/`
+python3 PythonChecker.py -host
+```
+
+验证通过将打印出成功信息。
+
+### Python连接器的使用
#### 代码示例
@@ -486,7 +565,7 @@ import taos
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
-* host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录
+* *host* 是TDengine 服务端所有IP, *config* 为客户端配置文件所在目录
* 写入数据
@@ -588,8 +667,8 @@ conn.close()
- 通过taos.connect获取TDengineConnection对象,这个对象可以一个程序只申请一个,在多线程中共享。
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
-- 通过游标对象的execute()方法,执行写入或查询的SQL语句
-- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows
+- 通过游标对象的execute()方法,执行写入或查询的SQL语句。
+- 如果执行的是写入语句,execute返回的是成功写入的行数信息affected rows。
- 如果执行的是查询语句,则execute执行成功后,需要通过fetchall方法去拉取结果集。
具体方法可以参考示例代码。
@@ -597,22 +676,52 @@ conn.close()
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
-注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
+注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。)
+
+### 安装
+
+RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。
+
+### 验证
-### HTTP请求格式
+在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。
+下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常。
+
+下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 fqdn 和端口号:
+```html
+curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
```
-http://:/rest/sql
+
+返回值结果如下表示验证通过:
+```json
+{
+ "status": "succ",
+ "head": ["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],
+ "data": [
+ ["log","2020-09-02 17:23:00.039",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,"us","ready"],
+ ],
+ "rows": 1
+}
+```
+
+### RESTful 连接器的使用
+
+#### HTTP 请求格式
+
+```
+http://:/rest/sql/[db_name]
```
参数说明:
-- IP: 集群中的任一台主机
-- PORT: 配置文件中httpPort配置项,缺省为6041
+- fqnd: 集群中的任一台主机 FQDN 或 IP 地址
+- port: 配置文件中 httpPort 配置项,缺省为 6041
+- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持)
-例如:http://192.168.0.1:6041/rest/sql 是指向IP地址为192.168.0.1的URL.
+例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。
-HTTP请求的Header里需带有身份认证信息,TDengine支持Basic认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
+HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
- 自定义身份认证信息如下所示(稍后介绍)
@@ -626,25 +735,25 @@ Authorization: Taosd
Authorization: Basic
```
-HTTP请求的BODY里就是一个完整的SQL语句,SQL语句中的数据表应提供数据库前缀,例如\.\。如果表名不带数据库前缀,系统会返回错误。因为HTTP模块只是一个简单的转发,没有当前DB的概念。
+HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据表应提供数据库前缀,例如 \.\。如果表名不带数据库前缀,又没有在 url 中指定数据库名的话,系统会返回错误。因为 HTTP 模块只是一个简单的转发,没有当前 DB 的概念。
-使用curl通过自定义身份认证方式来发起一个HTTP Request,语法如下:
+使用 curl 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash
-curl -H 'Authorization: Basic ' -d '' :/rest/sql
+curl -H 'Authorization: Basic ' -d '' :/rest/sql/[db_name]
```
或者
```bash
-curl -u username:password -d '' :/rest/sql
+curl -u username:password -d '' :/rest/sql/[db_name]
```
-其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串,例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==`
+其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
-### HTTP返回格式
+### HTTP 返回格式
-返回值为JSON格式,如下:
+返回值为 JSON 格式,如下:
```json
{
@@ -662,12 +771,12 @@ curl -u username:password -d '' :/rest/sql
说明:
- status: 告知操作结果是成功还是失败。
-- head: 表的定义,如果不返回结果集,则仅有一列“affected_rows”。(从 2.0.17 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。)
-- column_meta: 从 2.0.17 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
-- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
+- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。)
+- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
+- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
- rows: 表明总共多少行数据。
-column_meta 中的列类型说明:
+ column_meta 中的列类型说明:
* 1:BOOL
* 2:TINYINT
* 3:SMALLINT
@@ -681,19 +790,19 @@ column_meta 中的列类型说明:
### 自定义授权码
-HTTP请求中需要带有授权码``,用于身份识别。授权码通常由管理员提供,可简单的通过发送`HTTP GET`请求来获取授权码,操作如下:
+HTTP 请求中需要带有授权码 ``,用于身份识别。授权码通常由管理员提供,可简单的通过发送 `HTTP GET` 请求来获取授权码,操作如下:
```bash
-curl http://:6041/rest/login//
+curl http://:/rest/login//
```
-其中,`ip`是TDengine数据库的IP地址,`username`为数据库用户名,`password`为数据库密码,返回值为`JSON`格式,各字段含义如下:
+其中,`fqdn` 是 TDengine 数据库的 fqdn 或 ip 地址,port 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 `JSON` 格式,各字段含义如下:
- status:请求结果的标志位
- code:返回值代码
-- desc: 授权码
+- desc:授权码
获取授权码示例:
@@ -713,7 +822,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
### 使用示例
-- 在demo库里查询表d1001的所有记录:
+- 在 demo 库里查询表 d1001 的所有记录:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
@@ -733,7 +842,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
}
```
-- 创建库demo:
+- 创建库 demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
@@ -752,9 +861,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19
### 其他用法
-#### 结果集采用Unix时间戳
+#### 结果集采用 Unix 时间戳
-HTTP请求URL采用`sqlt`时,返回结果集的时间戳将采用Unix时间戳格式表示,例如
+HTTP 请求 URL 采用 `sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
@@ -775,9 +884,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
}
```
-#### 结果集采用UTC时间字符串
+#### 结果集采用 UTC 时间字符串
-HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间字符串表示,例如
+HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
```
@@ -799,13 +908,14 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
### 重要配置项
-下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
+下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。(注意:配置修改后,需要重启 taosd 服务才能生效)
-- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
-- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
-- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
-- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
-- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
+- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。
+- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。
+- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。
+- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。
+- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认 131。
+- httpDbNameMandatory: 是否必须在 RESTful url 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful url 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。
## CSharp Connector
@@ -814,9 +924,15 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
### 安装准备
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
-* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
+* 接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。
+### 示例程序
+
+示例程序源码位于install_directory/examples/C#,有:
+
+TDengineTest.cs C#示例源程序
+
### 安装验证
运行install_directory/examples/C#/C#Checker/C#Checker.exe
@@ -829,17 +945,17 @@ C#Checker.exe -h
### C#连接器的使用
-在Windows系统上,.NET应用程序可以使用TDengine的.NET接口来执行所有数据库的操作。使用.NET接口的步骤如下所示:
+在Windows系统上,C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示:
-1. 将.NET接口文件TDengineDrivercs.cs加入到应用程序所在.NET项目中。
-2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作;
+1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。
+2. 用户可以参考TDengineTest.cs来定义数据库连接参数,以及如何执行数据插入、查询等操作。
-此.NET接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到.NET项目最后生成.exe可执行文件所在文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。
+此接口需要用到taos.dll文件,所以在执行应用程序前,拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件,即可访问TDengine数据库并做插入、查询等操作。
**注意:**
-1. TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。
-2. 此.NET接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。
+1. TDengine V2.0.3.0之后同时支持32位和64位Windows系统,所以C#项目在生成.exe文件时,“解决方案”/“项目”的“平台”请选择对应的“X86” 或“x64”。
+2. 此接口目前已经在Visual Studio 2015/2017中验证过,其它VS版本尚待验证。
### 第三方驱动
@@ -856,32 +972,59 @@ https://www.taosdata.com/blog/2020/11/02/1901.html
### 安装准备
-* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
+Go连接器支持的系统有:
-TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。
+| **CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
+| --------------- | ------------ | -------- | -------- | -------- | ---------- |
+| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
+| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** |
+
+安装前准备:
+
+- 已安装好TDengine应用驱动,参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。
+
+### 示例程序
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。
-```Go
+示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中。
+
+**提示:建议Go版本是1.13及以上,并开启模块支持:**
+```sh
+go env -w GO111MODULE=on
+go env -w GOPROXY=https://goproxy.io,direct
+```
+在taosdemo.go所在目录下进行编译和执行:
+```sh
+go mod init taosdemo
+go get github.com/taosdata/driver-go/taosSql
+# use win branch in Windows platform.
+#go get github.com/taosdata/driver-go/taosSql@win
+go build
+./taosdemo -h fqdn -p serverPort
+```
+
+### Go连接器的使用
+
+TDengine提供了GO驱动程序包`taosSql`。`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。
+```go
import (
- "database/sql"
- _ "github.com/taosdata/driver-go/taosSql"
+ "database/sql"
+ _ "github.com/taosdata/driver-go/v2/taosSql"
)
```
-**建议使用Go版本1.13或以上,并开启模块支持:**
-```bash
-go env -w GO111MODULE=on
-go env -w GOPROXY=https://goproxy.io,direct
-```
+**提示**:下划线与双引号之间必须有一个空格。
+
+`taosSql` 的 v2 版本进行了重构,分离出内置数据库操作接口 `database/sql/driver` 到目录 `taosSql`;订阅、stmt等其他功能放到目录 `af`。
### 常用API
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
- 该API用来打开DB,返回一个类型为\*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
+ 该API用来打开DB,返回一个类型为\*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`,dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
- **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。
+ **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos动态库中。所以,sql.Open本身特别轻量。
- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)`
@@ -920,7 +1063,7 @@ Node.js连接器支持的系统有:
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
-Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)
+Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)。
### 安装准备
@@ -930,14 +1073,14 @@ Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020
用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下:
-首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器.
+首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器。
```bash
npm install td2.0-connector
```
-我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
+我们建议用户使用npm 安装node.js连接器。如果您没有安装npm,可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下。
-我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
+我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js连接器之前,还需要根据具体操作系统来安装下文提到的一些依赖工具。
### Linux
@@ -950,19 +1093,19 @@ npm install td2.0-connector
#### 安装方法1
-使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
+使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具。
#### 安装方法2
-手动安装以下工具:
+手动安装以下工具:
- 安装Visual Studio相关:[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
-- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
-- 进入`cmd`命令行界面, `npm config set msvs_version 2017`
+- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
+- 进入`cmd`命令行界面,`npm config set msvs_version 2017`
-如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
+如果以上步骤不能成功执行,可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)。
-如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64".
+如果在Windows 10 ARM 上使用ARM64 Node.js,还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。
### 示例程序
@@ -979,7 +1122,7 @@ Node-example-raw.js
1. 新建安装验证目录,例如:`~/tdengine-test`,拷贝github上nodejsChecker.js源程序。下载地址:(https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js)。
-2. 在命令中执行以下命令:
+2. 在命令行中执行以下命令:
```bash
npm init -y
@@ -991,23 +1134,19 @@ node nodejsChecker.js host=localhost
### Node.js连接器的使用
-以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)
+以下是Node.js 连接器的一些基本使用方法,详细的使用方法可参考[TDengine Node.js connector](http://docs.taosdata.com/node)。
#### 建立连接
-使用node.js连接器时,必须先require `td2.0-connector`,然后使用 `taos.connect` 函数。`taos.connect` 函数必须提供的参数是`host`,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化`cursor` 来和TDengine服务端通信
+使用node.js连接器时,必须先`require td2.0-connector`,然后使用 `taos.connect` 函数建立到服务端的连接。例如如下代码:
```javascript
const taos = require('td2.0-connector');
-var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
+var conn = taos.connect({host:"taosdemo.com", user:"root", password:"taosdata", config:"/etc/taos",port:6030})
var cursor = conn.cursor(); // Initializing a new cursor
```
-关闭连接可执行
-
-```javascript
-conn.close();
-```
+建立了一个到hostname为taosdemo.com,端口为6030(Tdengine的默认端口号)的连接。连接指定了用户名(root)和密码(taosdata)。taos.connect 函数必须提供的参数是`host`,其它参数在没有提供的情况下会使用如下的默认值。taos.connect返回了`cursor` 对象,使用cursor来执行sql语句。
#### 执行SQL和插入数据
@@ -1027,7 +1166,7 @@ var affectRows = cursor.execute('insert into test.weather values(now, 22.3, 34);
execute方法的返回值为该语句影响的行数,上面的sql向test库的weather表中,插入了一条数据,则返回值affectRows为1。
-TDengine目前还不支持update和delete语句。
+TDengine 目前还不支持 delete 语句。但从 2.0.8.0 版本开始,可以通过 `CREATE DATABASE` 时指定的 UPDATE 参数来启用对数据行的 update。
#### 查询
@@ -1037,7 +1176,7 @@ TDengine目前还不支持update和delete语句。
var query = cursor.query('show databases;')
```
-查询的结果可以通过 `query.execute()` 函数获取并打印出来
+查询的结果可以通过 `query.execute()` 函数获取并打印出来。
```javascript
var promise = query.execute();
@@ -1061,6 +1200,14 @@ promise.then(function(result) {
result.pretty();
})
```
+
+#### 关闭连接
+
+在完成插入、查询等操作后,要关闭连接。代码如下:
+```js
+conn.close();
+```
+
#### 异步函数
异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。
@@ -1077,6 +1224,6 @@ promise2.then(function(result) {
### 示例
-[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
+[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例。
-[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
+[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`。
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index 7a13270830e94e67bf6d37530ca659d4ef9ff561..d5a2f2763550e54a0c1829ff87c60b7bbca3defe 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -19,11 +19,18 @@ TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplug
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
+Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
+```
+[plugins]
+enable_alpha = true
+allow_loading_unsigned_plugins = taosdata-tdengine-datasource
+```
+
### 使用 Grafana
#### 配置数据源
-用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
+用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:

@@ -35,7 +42,7 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde

-* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041
+* Host: TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6041),默认 http://localhost:6041 。
* User:TDengine 用户名。
* Password:TDengine 用户密码。
@@ -64,7 +71,7 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
#### 导入 Dashboard
-在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
+在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
@@ -140,13 +147,13 @@ conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","ro
- dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE):将数据框iris写入表test中,overwrite必须设置为false,append必须设为TRUE,且数据框iris要与表test的结构一致。
-- dbGetQuery(conn, "select count(*) from test"):查询语句
+- dbGetQuery(conn, "select count(*) from test"):查询语句。
- dbSendUpdate(conn, "use db"):执行任何非查询sql语句。例如dbSendUpdate(conn, "use db"), 写入数据dbSendUpdate(conn, "insert into t1 values(now, 99)")等。
-- dbReadTable(conn, "test"):读取表test中数据
-- dbDisconnect(conn):关闭连接
-- dbRemoveTable(conn, "test"):删除表test
+- dbReadTable(conn, "test"):读取表test中数据。
+- dbDisconnect(conn):关闭连接。
+- dbRemoveTable(conn, "test"):删除表test。
TDengine客户端暂不支持如下函数:
-- dbExistsTable(conn, "test"):是否存在表test
-- dbListTables(conn):显示连接中的所有表
+- dbExistsTable(conn, "test"):是否存在表test。
+- dbListTables(conn):显示连接中的所有表。
diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md
index ecc9352ba6bb68743407c9a1013719439dedf218..1f6f84dd1a3e66da5a64d07358d97e6f89bdc8c0 100644
--- a/documentation20/cn/10.cluster/docs.md
+++ b/documentation20/cn/10.cluster/docs.md
@@ -12,9 +12,9 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】
-**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
+**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`);
-**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
+**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是修改 hosts 文件。
**第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
@@ -25,7 +25,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查);
2. 每个物理节点上执行`ping host`,其中host是其他物理节点的hostname,看能否ping通其它物理节点;如果不能ping通,需要检查网络设置,或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件;
-4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
+4. 每个数据节点的End Point就是输出的hostname外加端口号,比如`h1.taosdata.com:6030`。
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030,其与集群配置相关参数如下:
@@ -79,13 +79,13 @@ Query OK, 1 row(s) in set (0.006385s)
taos>
```
-上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。
+上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEp。
## 启动后续数据节点
将后续的数据节点添加到现有集群,具体有以下几步:
-1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
+1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEp参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令:
@@ -110,7 +110,7 @@ taos>
**提示:**
-- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。
+- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。
- firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
- 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
- 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。
@@ -119,9 +119,14 @@ taos>
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
+
+**提示:**
+
+- 以下所有执行命令的操作需要先登陆进TDengine系统,必要时请使用root权限。
+
### 添加数据节点
-执行CLI程序taos,使用root账号登录进系统,执行:
+执行CLI程序taos,执行:
```
CREATE DNODE "fqdn:port";
@@ -131,7 +136,7 @@ CREATE DNODE "fqdn:port";
### 删除数据节点
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,执行:
```mysql
DROP DNODE "fqdn:port | dnodeID";
@@ -153,7 +158,7 @@ DROP DNODE "fqdn:port | dnodeID";
手动将某个vnode迁移到指定的dnode。
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,执行:
```mysql
ALTER DNODE BALANCE "VNODE:-DNODE:";
@@ -169,7 +174,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:";
### 查看数据节点
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,执行:
```mysql
SHOW DNODES;
```
@@ -180,8 +185,9 @@ SHOW DNODES;
为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
-执行CLI程序taos,使用root账号登录进TDengine系统,执行:
+执行CLI程序taos,执行:
```mysql
+USE SOME_DATABASE;
SHOW VGROUPS;
```
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index f8634c33e191090c52ca99f8d5262384c6f2b72b..d26cd3c845527084612d1a876076838f5d0f9f1a 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -73,7 +73,7 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。
-**立即计算 CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)**
+**立即计算 CPU、内存、存储,请参见:[资源估算方法](https://www.taosdata.com/config/config.html)。**
## 容错和灾备
@@ -216,8 +216,8 @@ taosd -C
| 98 | maxBinaryDisplayWidth | | **C** | | Taos shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏 | 5 - | 30 | 实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。可在 shell 中通过命令 set max_binary_display_width nn动态修改此选项 |
| 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。(2.0.15 以前的版本中,此参数的单位是字节) |
| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 |
-| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 |
-| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
+| 101 | update | | **S** | | 允许更新已存在的数据行 | 0:不允许更新;1:允许整行更新;2:允许部分列更新。(2.1.7.0 版本开始此参数支持设为 2,在此之前取值只能是 [0, 1]) | 0 | 2.0.8.0 版本之前,不支持此参数。 |
+| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。(2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1]) | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | |
| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
@@ -230,7 +230,7 @@ taosd -C
| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 |
| 2 | keep | 天 | (可通过 alter database 修改)数据库中数据保留的天数。 | 3650 |
| 3 | cache | MB | 内存块的大小 | | 16 |
-| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 4 |
+| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 6 |
| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 |
| 6 | minRows | | 文件块中记录的最小条数 | | 100 |
| 7 | maxRows | | 文件块中记录的最大条数 | | 4096 |
@@ -239,7 +239,7 @@ taosd -C
| 10 | fsync | 毫秒 | 当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。 | | 3000 |
| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 |
| 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)(从 2.1.5.0 版本开始,新增对纳秒时间精度的支持) | ms 表示毫秒,us 表示微秒,ns 表示纳秒 | ms |
-| 13 | update | | 是否允许更新 | 0:不允许;1:允许 | 0 |
+| 13 | update | | 是否允许数据更新(从 2.1.7.0 版本开始此参数支持 0~2 的取值范围,在此之前取值只能是 [0, 1];而 2.0.8.0 之前的版本在 SQL 指令中不支持此参数。) | 0:不允许;1:允许更新整行;2:允许部分列更新。 | 0 |
| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能 | 0 |
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
@@ -375,7 +375,7 @@ taos -C 或 taos --dump-config
timezone GMT-8
timezone Asia/Shanghai
```
- 均是合法的设置东八区时区的格式。
+ 均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如:
```sql
@@ -433,7 +433,7 @@ SHOW USERS;
显示所有用户
-**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身
+**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身。
## 数据导入
@@ -445,7 +445,7 @@ TDengine的shell支持source filename命令,用于批量运行文件中的SQL
**按数据文件导入**
-TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同, 在导入的时候,其语法如下
+TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同,在导入的时候,其语法如下:
```mysql
insert into tb1 file 'path/data.csv';
@@ -487,7 +487,7 @@ Query OK, 9 row(s) affected (0.004763s)
**taosdump工具导入**
-TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)
+TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。
## 数据导出
@@ -568,6 +568,35 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
+
+## 浮点数有损压缩
+
+在车联网等物联网智能应用场景中,经常会采集和存储海量的浮点数类型数据,如果能更高效地对此类数据进行压缩,那么不但能够节省数据存储的硬件资源,也能够因降低磁盘 I/O 数据量而提升系统性能表现。
+
+从 2.1.6.0 版本开始,TDengine 提供一种名为 TSZ 的新型数据压缩算法,无论设置为有损压缩还是无损压缩,都能够显著提升浮点数类型数据的压缩率表现。目前该功能以可选模块的方式进行发布,可以通过添加特定的编译参数来启用该功能(也即常规安装包中暂未包含该功能)。
+
+**需要注意的是,该功能一旦启用,效果是全局的,也即会对系统中所有的 FLOAT、DOUBLE 类型的数据生效。同时,在启用了浮点数有损压缩功能后写入的数据,也无法被未启用该功能的版本载入,并有可能因此而导致数据库服务报错退出。**
+
+### 创建支持 TSZ 压缩算法的 TDengine 版本
+
+TSZ 模块保存在单独的代码仓库 https://github.com/taosdata/TSZ 中。可以通过以下步骤创建包含此模块的 TDengine 版本:
+1. TDengine 中的插件目前只支持通过 SSH 的方式拉取和编译,所以需要自己先配置好通过 SSH 拉取 GitHub 代码的环境。
+2. `git clone git@github.com:taosdata/TDengine -b your_branchname --recurse-submodules` 通过 `--recurse-submodules` 使依赖模块的源代码可以被一并下载。
+3. `mkdir debug && cd debug` 进入单独的编译目录。
+4. `cmake .. -DTSZ_ENABLED=true` 其中参数 `-DTSZ_ENABLED=true` 表示在编译过程中加入对 TSZ 插件功能的支持。如果成功激活对 TSZ 模块的编译,那么 CMAKE 过程中也会显示 `build with TSZ enabled` 字样。
+5. 编译成功后,包含 TSZ 浮点压缩功能的插件便已经编译进了 TDengine 中了,可以通过调整 taos.cfg 中的配置参数来使用此功能了。
+
+### 通过配置文件来启用 TSZ 压缩算法
+
+如果要启用 TSZ 压缩算法,除了在 TDengine 的编译过程需要声明启用 TSZ 模块之外,还需要在 taos.cfg 配置文件中对以下参数进行设置:
+* lossyColumns:配置要进行有损压缩的浮点数数据类型。参数值类型为字符串,含义为:空 - 关闭有损压缩;float - 只对 FLOAT 类型进行有损压缩;double - 只对 DOUBLE 类型进行有损压缩;float|double:对 FLOAT 和 DOUBLE 类型都进行有损压缩。默认值是“空”,也即关闭有损压缩。
+* fPrecision:设置 float 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 FLOAT,最小值为 0.0,最大值为 100,000.0。缺省值为 0.00000001(1E-8)。
+* dPrecision:设置 double 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 DOUBLE,最小值为 0.0,最大值为 100,000.0。缺省值为 0.0000000000000001(1E-16)。
+* maxRange:表示数据的最大浮动范围。一般无需调整,在数据具有特定特征时可以配合 range 参数来实现极高的数据压缩率。默认值为 500。
+* range:表示数据大体浮动范围。一般无需调整,在数据具有特定特征时可以配合 maxRange 参数来实现极高的数据压缩率。默认值为 100。
+
+**注意:**对 cfg 配置文件中参数值的任何调整,都需要重新启动 taosd 才能生效。并且以上选项为全局配置选项,配置后对所有数据库中所有表的 FLOAT 及 DOUBLE 类型的字段生效。
+
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
@@ -627,7 +656,7 @@ Active: inactive (dead)
......
```
-卸载 TDengine,只需要执行如下命令
+卸载 TDengine,只需要执行如下命令:
```
rmtaos
```
@@ -652,7 +681,7 @@ rmtaos
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
-- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳
+- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列)
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte
- 数据库副本数:不能超过 3
@@ -665,7 +694,7 @@ rmtaos
- 库的个数:仅受节点个数限制
- 单个库上虚拟节点个数:不能超过 64 个
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
-- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
+- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
@@ -724,7 +753,7 @@ rmtaos
2. 服务端命令行输入:`taos -n server -P ` 以服务端身份启动对端口 port 为基准端口的监听
3. 客户端命令行输入:`taos -n client -h -P ` 以客户端身份启动对指定的服务器、指定的端口发送测试包
-服务端运行正常的话会输出以下信息
+服务端运行正常的话会输出以下信息:
```bash
# taos -n server -P 6000
@@ -796,6 +825,28 @@ taos -n sync -P 6042 -h
用来诊断 sync 端口是否工作正常,判断服务端 sync 模块是否成功工作。另外,-P 6042 用来诊断 arbitrator 是否配置正常,判断指定服务器的 arbitrator 是否能正常工作。
+#### 网络速度诊断
+
+`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
+
+从 2.2.0.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
+
+-n:设为“speed”时,表示对网络速度进行诊断。
+-h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
+-P:所连接服务端的网络端口。默认值为 6030。
+-N:诊断过程中使用的网络包总数。最小值是 1、最大值是 10000,默认值为 100。
+-l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024*1024*1024,默认值为 1000。
+-S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。
+
+#### FQDN 解析速度诊断
+
+`taos -n fqdn -h `
+
+从 2.2.0.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
+
+-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。
+-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
+
#### 服务端日志
taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。
diff --git a/documentation20/cn/12.taos-sql/02.udf/docs.md b/documentation20/cn/12.taos-sql/02.udf/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..cced65db802d9a589602fe9371b0468605cb4819
--- /dev/null
+++ b/documentation20/cn/12.taos-sql/02.udf/docs.md
@@ -0,0 +1,147 @@
+# UDF(用户定义函数)
+
+在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。
+
+从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
+
+## 用 C/C++ 语言来定义 UDF
+
+TDengine 提供 3 个 UDF 的源代码示例,分别为:
+* [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c)
+* [abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c)
+* [sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c)
+
+### 无需中间变量的标量函数
+
+[add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。
+
+这一具体的处理逻辑在函数 `void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` 中定义。这类用于实现 UDF 的基础计算逻辑的函数,我们称为 udfNormalFunc,也就是对行数据块的标量计算函数。需要注意的是,udfNormalFunc 的参数项是固定的,用于按照约束完成与引擎之间的数据交换。
+
+- udfNormalFunc 中各参数的具体含义是:
+ * data:存有输入的数据。
+ * itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](https://www.taosdata.com/cn/documentation/connector#column_meta)。例如 4 用于表示 INT 型。
+ * iBytes:输入数据中每个值会占用的字节数。
+ * numOfRows:输入数据的总行数。
+ * ts:主键时间戳在输入中的列数据。
+ * dataOutput:输出数据的缓冲区。
+ * interBuf:系统使用的中间临时缓冲区,通常用户逻辑无需对 interBuf 进行处理。
+ * tsOutput:主键时间戳在输出时的列数据。
+ * numOfOutput:输出数据的个数。
+ * oType:输出数据的类型。取值含义与 itype 参数一致。
+ * oBytes:输出数据中每个值会占用的字节数。
+ * buf:计算过程的中间变量缓冲区。
+
+其中 buf 参数需要用到一个自定义结构体 SUdfInit。在这个例子中,因为 add_one 的计算过程无需用到中间变量缓存,所以可以把 SUdfInit 定义成一个空结构体。
+
+### 无需中间变量的聚合函数
+
+[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。
+
+其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`),再将每个数据块的计算结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成每个子表的聚合结果。如果查询指令涉及超级表,那么最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把子表的计算结果聚合为超级表的计算结果。
+
+值得注意的是,udfNormalFunc、udfMergeFunc、udfFinalizeFunc 之间,函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名。udfMergeFunc 的函数名后缀 `_merge`、udfFinalizeFunc 的函数名后缀 `_finalize`,是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。
+
+- udfMergeFunc 用于对计算中间结果进行聚合。本例中 udfMergeFunc 对应的实现函数为 `void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是:
+ * data:udfNormalFunc 的输出组合在一起的数据,也就成为了 udfMergeFunc 的输入。
+ * numOfRows:data 中数据的行数。
+ * dataOutput:输出数据的缓冲区。
+ * numOfOutput:输出数据的个数。
+ * buf:计算过程的中间变量缓冲区。
+
+- udfFinalizeFunc 用于对计算结果进行最终聚合。本例中 udfFinalizeFunc 对应的实现函数为 `void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`,其中各参数的具体含义是:
+ * dataOutput:输出数据的缓冲区。对 udfFinalizeFunc 来说,其输入数据也来自于这里。
+ * interBuf:系统使用的中间临时缓冲区,与 udfNormalFunc 中的同名参数含义一致。
+ * numOfOutput:输出数据的个数。
+ * buf:计算过程的中间变量缓冲区。
+
+同样因为 abs_max 的计算过程无需用到中间变量缓存,所以同样是可以把 SUdfInit 定义成一个空结构体。
+
+### 使用中间变量的聚合函数
+
+[sum_double.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sum_double.c) 也是一个聚合函数,功能是对一组数据输出求和结果的倍数。
+
+出于功能演示的目的,在这个用户定义函数的实现方法中,用到了中间变量缓冲区 buf。因此,在这个源代码文件中,SUdfInit 就不再是一个空的结构体,而是定义了缓冲区的具体存储内容。
+
+也正是因为用到了中间变量缓冲区,因此就需要对这一缓冲区进行初始化和资源释放。具体来说,也即对应 udfInitFunc(本例中,其实际的函数名是 `sum_double_init`)和 udfDestroyFunc(本例中,其实际的函数名是 `sum_double_destroy`)。其函数名命名规则同样是采取以 udfNormalFunc 的实际函数名为前缀,以 `_init` 和 `_destroy` 为后缀。系统会在初始化和资源释放时调用对应名称的函数。
+
+- udfInitFunc 用于初始化中间变量缓冲区中的变量和内容。本例中 udfInitFunc 对应的实现函数为 `int sum_double_init(SUdfInit* buf)`,其中各参数的具体含义是:
+ * buf:计算过程的中间变量缓冲区。
+
+- udfDestroyFunc 用于释放中间变量缓冲区中的变量和内容。本例中 udfDestroyFunc 对应的实现函数为 `void sum_double_destroy(SUdfInit* buf)`,其中各参数的具体含义是:
+ * buf:计算过程的中间变量缓冲区。
+
+注意,UDF 的实现过程中需要小心处理对中间变量缓冲区的使用,如果使用不当则有可能导致内存泄露或对资源的过度占用,甚至导致系统服务进程崩溃等。
+
+### UDF 实现方式的规则总结
+
+根据所要实现的 UDF 类型不同,用户所要实现的功能函数内容也会有所区别:
+* 无需中间变量的标量函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc。
+* 无需中间变量的聚合函数:结构体 SUdfInit 可以为空,需实现 udfNormalFunc、udfMergeFunc、udfFinalizeFunc。
+* 使用中间变量的标量函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc。
+* 使用中间变量的聚合函数:结构体 SUdfInit 需要具体定义,并需实现 udfNormalFunc、udfInitFunc、udfDestroyFunc、udfMergeFunc、udfFinalizeFunc。
+
+## 编译 UDF
+
+用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 .so 链接库,之后才能载入 TDengine 系统。
+
+例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,那么可以执行如下指令编译得到动态链接库文件:
+```bash
+gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
+```
+
+这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。
+
+## 在系统中管理和使用 UDF
+
+### 创建 UDF
+
+用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
+
+在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。
+
+- 创建标量函数:`CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize B;`
+ * ids(X):标量函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
+ * ids(Y):包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
+ * typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
+ * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。
+
+ 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF:
+ ```sql
+ CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT bufsize 128;
+ ```
+
+- 创建聚合函数:`CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize B;`
+ * ids(X):聚合函数未来在 SQL 指令中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
+ * ids(Y):包含 UDF 函数实现的动态链接库的库文件路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
+ * typename(Z):此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
+ * B:系统使用的中间临时缓冲区大小,单位是字节,最小 0,最大 512,通常可以设置为 128。
+
+ 例如,如下语句可以把 add_one.so 创建为系统中可用的 UDF:
+ ```sql
+ CREATE FUNCTION abs_max AS "/home/taos/udf_example/abs_max.so" OUTPUTTYPE BIGINT bufsize 128;
+ ```
+
+### 管理 UDF
+
+- 删除指定名称的用户定义函数:`DROP FUNCTION ids(X);`
+ * ids(X):此参数的含义与 CREATE 指令中的 ids(X) 参数一致,也即要删除的函数的名字,例如 `DROP FUNCTION add_one;`。
+- 显示系统中当前可用的所有 UDF:`SHOW FUNCTIONS;`
+
+### 调用 UDF
+
+在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
+```sql
+SELECT X(c) FROM table/stable;
+```
+
+表示对名为 c 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
+
+## UDF 的一些使用限制
+
+在当前版本下,使用 UDF 存在如下这些限制:
+1. 在创建和调用 UDF 时,服务端和客户端都只支持 Linux 操作系统;
+2. UDF 不能与系统内建的 SQL 函数混合使用;
+3. UDF 只支持以单个数据列作为输入;
+4. UDF 只要创建成功,就会被持久化存储到 MNode 节点中;
+5. 无法通过 RESTful 接口来创建 UDF;
+6. UDF 在 SQL 中定义的函数名,必须与 .so 库文件实现中的接口函数名前缀保持一致,也即必须是 udfNormalFunc 的名称,而且不可与 TDengine 中已有的内建 SQL 函数重名。
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 6c4bc7f5030153069cb9c177449a68cd11a4c521..dabbb3d2af598c84f6c55f921d524cb9ddccb83b 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -9,7 +9,7 @@ TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。
本章节 SQL 语法遵循如下约定:
- < > 里的内容是用户需要输入的,但不要输入 <> 本身
-- [ ] 表示内容为可选项,但不能输入 [] 本身
+- \[ \] 表示内容为可选项,但不能输入 [] 本身
- | 表示多选一,选择其中一个即可,但不能输入 | 本身
- … 表示前面的项可重复多个
@@ -35,7 +35,7 @@ taos> DESCRIBE meters;
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。)
-- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
+- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度)
@@ -70,7 +70,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
- 2) UPDATE 标志数据库支持更新相同时间戳数据;
+ 2) UPDATE 标志数据库支持更新相同时间戳数据;(从 2.1.7.0 版本开始此参数支持设为 2,表示允许部分列更新,也即更新数据行时未被设置的列会保留原值。)(从 2.0.8.0 版本开始支持此参数。注意此参数不能通过 `ALTER DATABASE` 指令进行修改。)
3) 数据库名最大长度为33;
@@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
- 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
-
- 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
-
- **显示一个数据表的创建语句**
```mysql
@@ -237,7 +233,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```
说明:
- 1) 列的最大个数为1024,最小个数为2;
+ 1) 列的最大个数为1024,最小个数为2;(从 2.1.7.0 版本开始,改为最多允许 4096 列)
2) 列名最大长度为64。
@@ -265,7 +261,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
```
- 创建 STable,与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型
+ 创建 STable,与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型。
说明:
@@ -577,16 +573,24 @@ Query OK, 2 row(s) in set (0.003112s)
注意:普通表的通配符 * 中并不包含 _标签列_。
-##### 获取标签列的去重取值
+#### 获取标签列或普通列的去重取值
-从 2.0.15 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。
-```mysql
-SELECT DISTINCT tag_name FROM stb_name;
+从 2.0.15.0 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。注意,在 2.1.6.0 版本之前,DISTINCT 只支持处理单个标签列,而从 2.1.6.0 版本开始,DISTINCT 可以对多个标签列进行处理,输出这些标签列取值不重复的组合。
+```sql
+SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name;
```
-注意:目前 DISTINCT 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
+从 2.1.7.0 版本开始,DISTINCT 也支持对数据子表或普通表进行处理,也即支持获取单个普通列的不重复取值,或多个普通列取值的不重复组合。
+```sql
+SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
+```
+需要注意的是,DISTINCT 目前不支持对超级表中的普通列进行处理。如果需要进行此类操作,那么需要把超级表放在子查询中,再对子查询的计算结果执行 DISTINCT。
+说明:
+1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。
+2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。
+3. 在当前版本下,DISTINCT 不能在嵌套查询的子查询中使用,也不能与聚合函数、GROUP BY、或 JOIN 在同一条语句中混用。
#### 结果集列名
@@ -717,31 +721,79 @@ Query OK, 1 row(s) in set (0.001091s)
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
+| is [not] null | is null or is not null | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
-| in | matches any value in a set | all types except first column `timestamp` |
-| % | match with any char sequences | **`binary`** **`nchar`** |
-| _ | match with a single char | **`binary`** **`nchar`** |
+| in | match any value in a set | all types except first column `timestamp` |
+| like | match a wildcard string | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
-2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
-3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
-4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
-5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
+2. like 算子使用通配符字符串进行匹配检查。
+ * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
+ * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
+ * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
+3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
+ * 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
+4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
+ * 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
+5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
+
+
+### JOIN 子句
+
+从 2.2.0.0 版本开始,TDengine 对内连接(INNER JOIN)中的自然连接(Natural join)操作实现了完整的支持。也即支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间”进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。
+
+在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如:
+```sql
+SELECT *
+FROM temp_tb_1 t1, pressure_tb_1 t2
+WHERE t1.ts = t2.ts
+```
+
+在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如:
+```sql
+SELECT *
+FROM temp_stable t1, temp_stable t2
+WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
+```
+
+类似地,也可以对多个子查询的查询结果进行 JOIN 操作。
-
+
+说明:
+1. 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
+2. 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
+3. 目前不能在“连续查询”功能中使用子查询。
+4. 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
+5. 目前内层查询、外层查询均不支持 UNION 操作。
+6. 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
+ * 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
+7. 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
+ * 计算函数部分:
+ 1. 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
+ 2. 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
+ * 外层查询中不支持 IN 算子,但在内层中可以使用。
+ * 外层查询不支持 GROUP BY。
-### UNION ALL 操作符
+### UNION ALL 子句
```mysql
SELECT ...
@@ -1025,9 +1077,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*);
- 2) 如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;
+ 2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;
- 3) 如果结果集中所有列全部为NULL值,则不返回结果。
+ 3)如果结果集中所有列全部为NULL值,则不返回结果。
示例:
```mysql
@@ -1048,7 +1100,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
- 功能说明:统计表/超级表中某列的值最后写入的非NULL值。
+ 功能说明:统计表/超级表中某列的值最后写入的非 NULL 值。
返回结果数据类型:同应用的字段。
@@ -1058,9 +1110,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:
- 1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*);
+ 1)如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);
+
+ 2)如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
- 2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。
+ 3)在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
示例:
```mysql
@@ -1187,7 +1241,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
- 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
+ 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数。
```mysql
taos> SELECT APERCENTILE(current, 20) FROM d1001;
@@ -1209,9 +1263,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
- 说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
+ 限制:LAST_ROW() 不能与 INTERVAL 一起使用。
- 限制:LAST_ROW()不能与INTERVAL一起使用。
+ 说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
示例:
```mysql
@@ -1230,33 +1284,56 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **INTERP**
```mysql
- SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR})];
+ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
功能说明:返回表/超级表的指定时间截面、指定字段的记录。
- 返回结果数据类型:同应用的字段。
+ 返回结果数据类型:同字段类型。
- 应用字段:所有字段。
+ 应用字段:数值型字段。
适用于:**表、超级表**。
- 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。
+ 说明:(从 2.0.15.0 版本开始新增此函数)
+
+ 1)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。
+
+ 2)INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。
- 限制:INTERP 目前不支持 FILL(NEXT)。
+ 3)单个 INTERP 函数查询只能够针对一个时间点进行查询,如果需要返回等时间间隔的断面数据,可以通过 INTERP 配合 EVERY 的方式来进行查询处理(而不是使用 INTERVAL),其含义是每隔固定长度的时间进行插值。
示例:
- ```mysql
- taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev);
- interp(ts) | interp(f1) | interp(f2) | interp(f3) |
- ====================================================================
- 2017-07-14 10:42:00.005 | 5 | 9 | 6 |
- Query OK, 1 row(s) in set (0.002912s)
+ ```sql
+ taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004';
+ interp(ts) | interp(current) | interp(voltage) | interp(phase) |
+ ==========================================================================================
+ 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 |
+ Query OK, 1 row(s) in set (0.002652s)
+ ```
+
+ 如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。
+
+ ```sql
+ taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005';
+ Query OK, 0 row(s) in set (0.004022s)
+
+ taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV);;
+ interp(ts) | interp(current) | interp(voltage) | interp(phase) |
+ ==========================================================================================
+ 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 |
+ Query OK, 1 row(s) in set (0.003056s)
+ ```
+
+ 如下所示代码表示在时间区间 `['2017-7-14 18:40:00', '2017-7-14 18:40:00.014']` 中每隔 5 毫秒 进行一次断面计算。
+
+ ```sql
+ taos> SELECT INTERP(current) FROM d636 WHERE ts>='2017-7-14 18:40:00' AND ts<='2017-7-14 18:40:00.014' EVERY(5a);
+ ts | interp(current) |
+ =================================================
+ 2017-07-14 18:40:00.000 | 10.04179 |
+ 2017-07-14 18:40:00.010 | 10.16123 |
+ Query OK, 2 row(s) in set (0.003487s)
- taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev);
- interp(ts) | interp(f1) | interp(f2) | interp(f3) |
- ====================================================================
- 2017-07-14 10:42:00.005 | 5 | 6 | 7 |
- Query OK, 1 row(s) in set (0.002005s)
```
### 计算函数
@@ -1299,6 +1376,19 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:(从 2.1.3.0 版本开始新增此函数)输出结果行数是范围内总行数减一,第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
+ 示例:
+ ```mysql
+ taos> select derivative(current, 10m, 0) from t1;
+ ts | derivative(current, 10m, 0) |
+ ========================================================
+ 2021-08-20 10:11:22.790 | 0.500000000 |
+ 2021-08-20 11:11:22.791 | 0.166666620 |
+ 2021-08-20 12:11:22.791 | 0.000000000 |
+ 2021-08-20 13:11:22.792 | 0.166666620 |
+ 2021-08-20 14:11:22.792 | -0.666666667 |
+ Query OK, 5 row(s) in set (0.004883s)
+ ```
+
- **SPREAD**
```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
@@ -1371,8 +1461,6 @@ SELECT function_list FROM tb_name
SELECT function_list FROM stb_name
[WHERE where_condition]
- [SESSION(ts_col, tol_val)]
- [STATE_WINDOW(col)]
[INTERVAL(interval [, offset]) [SLIDING sliding]]
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
@@ -1383,8 +1471,8 @@ SELECT function_list FROM stb_name
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
* **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
- 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。
- 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。
+ 2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。(状态窗口暂不支持对超级表使用)
+ 3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。
@@ -1418,25 +1506,21 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
## TAOS SQL 边界限制
-- 数据库名最大长度为 32
-- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
-- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
-- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符
-- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
-- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
-- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
+- 数据库名最大长度为 32。
+- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
+- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
+- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。
+- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。
+- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
+- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
-## TAOS SQL其他约定
+## TAOS SQL 其他约定
**GROUP BY的限制**
-TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。
-
-**JOIN操作的限制**
-
-TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。
+TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。
-**IS NOT NULL与不为空的表达式适用范围**
+**IS NOT NULL 与不为空的表达式适用范围**
-IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
+IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md
index 300ff27fe457fe50c78a4b5090ec20ea8edd8957..14599079b7c5bf99d736b34504cf59f1112900b0 100644
--- a/documentation20/cn/13.faq/docs.md
+++ b/documentation20/cn/13.faq/docs.md
@@ -26,15 +26,15 @@
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
## 3. 创建数据表时提示more dnodes are needed
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
## 4. 如何让TDengine crash时生成core文件?
-请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)
+请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
## 5. 遇到错误“Unable to establish connection”, 我怎么办?
@@ -49,7 +49,7 @@
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
-4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
+4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name —— 可在服务器上执行Linux命令hostname -f获得),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。
@@ -74,16 +74,16 @@
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
-1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
-2. 如果网络配置有DNS server, 请检查是否正常工作
-3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。
+1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
+2. 如果网络配置有DNS server,请检查是否正常工作
+3. 如果网络没有配置DNS server,请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址
4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
如果你确认语法正确,2.0之前版本,请检查SQL语句长度是否超过64K。如果超过,也会返回这个错误。
-## 8. 是否支持validation queries?
+## 8. 是否支持validation queries?
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
@@ -96,9 +96,11 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。
+此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。
+
## 10. 我怎么创建超过1024列的表?
-使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。
+使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。)
## 11. 最有效的写入数据的方法是什么?
@@ -137,7 +139,7 @@ Connection = DriverManager.getConnection(url, properties);
TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事:
-- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname
+- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname。
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md
index a10c22ee622fe71f4215c981774b637fc7c177d9..258b2f718feb87a2fa8d92b17a403919ac2e8f56 100644
--- a/documentation20/en/00.index/docs.md
+++ b/documentation20/en/00.index/docs.md
@@ -1,37 +1,50 @@
# TDengine Documentation
-TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
+TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. For the older TDengine version 1.6 documentation, please click [here](https://www.taosdata.com/en/documentation16/).
## [TDengine Introduction](/evaluation)
* [TDengine Introduction and Features](/evaluation#intro)
* [TDengine Use Scenes](/evaluation#scenes)
-* [TDengine Performance Metrics and Verification]((/evaluation#))
+* [TDengine Performance Metrics and Verification](/evaluation#)
## [Getting Started](/getting-started)
-* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
-
-- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
-- [Command-line](/getting-started#console) : an easy way to access TDengine server
-- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
-- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
-- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment
+* [Quick Install](/getting-started#install): install via source code/package / Docker within seconds
+* [Quick Launch](/getting-started#start): start / stop TDengine quickly with systemctl
+* [Command-line](/getting-started#console) : an easy way to access TDengine server
+* [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
+* [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
+* [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment
## [Overall Architecture](/architecture)
-- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
-- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
-- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
-- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
+- [Data Model](/architecture#model): relational database model, but one table for one data collection point with static tags
+- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL architecture, high availability and horizontal scalability
+- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time
+- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications
- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
-- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
+- [Data Query](/architecture#query): support various SQL functions, downsampling, interpolation, and multi-table aggregation
## [Data Modeling](/model)
-- [Create a Database](/model#create-db): create a database for all data collection points with similar features
+- [Create a Database](/model#create-db): create a database for all data collection points with similar data characteristics
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
-- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
+- [Create a Table](/model#create-table): use STable as the template to create a table for each data collecting point
+
+## [Efficient Data Ingestion](/insert)
+
+- [Data Writing via SQL](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
+- [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code
+- [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code
+- [Data Writing via EMQ X](/insert#emq): Configure EMQ X to write MQTT data directly without any code
+- [Data Writing via HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
+
+## [Efficient Data Querying](/queries)
+
+- [Major Features](/queries#queries): support various standard query functions, setting filter conditions, and querying per time segment
+- [Multi-table Aggregation](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation
+- [Downsampling](/queries#sampling): aggregate data in successive time windows, support interpolation
## [TAOS SQL](/taos-sql)
@@ -40,27 +53,13 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Table Management](/taos-sql#table): add, drop, check, alter tables
- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
- [Tag Management](/taos-sql#tags): add, drop, alter tags
-- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
+- [Inserting Records](/taos-sql#insert): write single/multiple records a table, multiple records across tables, and historical data
- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
-- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
+- [Cutting and Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
-## [Efficient Data Ingestion](/insert)
-
-- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
-- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
-- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
-- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
-- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
-
-## [Efficient Data Querying](/queries)
-
-- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
-- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
-- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
-
## [Advanced Features](/advanced-features)
- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
@@ -71,7 +70,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
-- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
+- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
@@ -88,12 +87,12 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Installation and Management of TDengine Cluster](/cluster)
-- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
-- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
+- [Preparation](/cluster#prepare): important steps before deploying TDengine for production usage
+- [Create the First Node](/cluster#node-one): just follow the steps in quick start
- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
-- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
-- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
+- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through replicas
+- [Mnode Management](/cluster#mnode): mnodes are created automatically without any manual intervention
- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
@@ -108,27 +107,14 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
-- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords
-
-## TDengine Technical Design
-
-- [System Module]: taosd functions and modules partitioning
-- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system
-- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
-
-## Common Tools
-
-- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
-- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
-- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
+- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords
## Performance: TDengine vs Others
-- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
-- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
-- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
-- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
-- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
+- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/09/12/710.html)
+- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/09/12/708.html)
+- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/09/12/706.html)
+- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_en.pdf)
## More on IoT Big Data
@@ -136,7 +122,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
+- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
## FAQ
-- [FAQ: Common questions and answers](/faq)
+- [FAQ: Common questions and answers](/faq)
\ No newline at end of file
diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md
index 250f465d7b1280a78e18250f95aefaeca0c95415..5b2d0dd974203db1dafe8758e673a2f0970c3f17 100644
--- a/documentation20/en/01.evaluation/docs.md
+++ b/documentation20/en/01.evaluation/docs.md
@@ -2,21 +2,20 @@
## About TDengine
-TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
+TDengine is an innovative Big Data processing product launched by TAOS Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
-- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
+- **Performance improvement over 10 times**: An innovative data storage structure is defined, with every single core that can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
-- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
-- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
+- **Highly Available and Horizontal Scalable**: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support mission-critical applications.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost.
+- **Core is Open Sourced:** Except for some auxiliary features, the core of TDengine is open-sourced. Enterprise won't be locked by the database anymore. The ecosystem is more strong, products are more stable, and developer communities are more active.
-With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
+With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, since it makes full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.

-
Figure 1. TDengine Technology Ecosystem
## Overall Scenarios of TDengine
@@ -62,4 +61,4 @@ From the perspective of data sources, designers can analyze the applicability of
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
-| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
+| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from the market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counseling services. |
diff --git a/documentation20/en/02.getting-started/01.docker/docs.md b/documentation20/en/02.getting-started/01.docker/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..daa89ef1016179e7860e4178c52481aef2760243
--- /dev/null
+++ b/documentation20/en/02.getting-started/01.docker/docs.md
@@ -0,0 +1,243 @@
+# Quickly experience TDengine through Docker
+
+While it is not recommended to deploy TDengine services via Docker in a production environment, Docker tools do a good job of shielding the environmental differences in the underlying operating system and are well suited for use in development testing or first-time experience with the toolset for installing and running TDengine. In particular, Docker makes it relatively easy to try TDengine on Mac OSX and Windows systems without having to install a virtual machine or rent an additional Linux server. In addition, starting from version 2.0.14.0, TDengine provides images that support both X86-64, X86, arm64, and arm32 platforms, so non-mainstream computers that can run docker, such as NAS, Raspberry Pi, and embedded development boards, can also easily experience TDengine based on this document.
+
+The following article explains how to quickly build a single-node TDengine runtime environment via Docker to support development and testing through a Step by Step style introduction.
+
+## Docker download
+
+The Docker tools themselves can be downloaded from [Docker official site](https://docs.docker.com/get-docker/).
+
+After installation, you can check the Docker version in the command line terminal. If the version number is output properly, the Docker environment has been installed successfully.
+
+```bash
+$ docker -v
+Docker version 20.10.3, build 48d30b5
+```
+
+## Running TDengine in a Docker container
+
+1, Use the command to pull the TDengine image and make it run in the background.
+
+```bash
+$ docker run -d --name tdengine tdengine/tdengine
+7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
+```
+
+- **docker run**: Running a container via Docker
+- **--name tdengine**: Set the container name, we can see the corresponding container by the container name
+- **-d**: Keeping containers running in the background
+- **tdengine/tdengine**: Pulled from the official TDengine published application image
+- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**: The long character returned is the container ID, and we can also view the corresponding container by its container ID
+
+2, Verify that the container is running correctly.
+
+```bash
+$ docker ps
+CONTAINER ID IMAGE COMMAND CREATED STATUS ···
+c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
+```
+
+- **docker ps**: Lists information about all containers that are in running state.
+- **CONTAINER ID**: Container ID.
+- **IMAGE**: The mirror used.
+- **COMMAND**: The command to run when starting the container.
+- **CREATED**: The time when the container was created.
+- **STATUS**: The container status. Up means running.
+
+3, Go inside the Docker container and use TDengine.
+
+```bash
+$ docker exec -it tdengine /bin/bash
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
+```
+
+- **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop.
+- **-i**: Enter the interactive mode.
+- **-t**: Specify a terminal.
+- **c452519b0f9b**: The container ID, which needs to be modified according to the value returned by the docker ps command.
+- **/bin/bash**: Load the container and run bash to interact with it.
+
+4, After entering the container, execute the taos shell client program.
+
+```bash
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
+
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos>
+```
+
+The TDengine terminal successfully connects to the server and prints out a welcome message and version information. If it fails, an error message is printed.
+
+In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql).
+
+## Learn more about TDengine with taosdemo
+
+1, Following the above steps, exit the TDengine terminal program first.
+
+```bash
+$ taos> q
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
+```
+
+2, Execute taosdemo from the command line interface.
+
+```bash
+root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
+
+taosdemo is simulating data generated by power equipments monitoring...
+
+host: 127.0.0.1:6030
+user: root
+password: taosdata
+configDir:
+resultFile: ./output.txt
+thread num of insert data: 10
+thread num of create table: 10
+top insert interval: 0
+number of records per req: 30000
+max sql length: 1048576
+database count: 1
+database[0]:
+ database[0] name: test
+ drop: yes
+ replica: 1
+ precision: ms
+ super table count: 1
+ super table[0]:
+ stbName: meters
+ autoCreateTable: no
+ childTblExists: no
+ childTblCount: 10000
+ childTblPrefix: d
+ dataSource: rand
+ iface: taosc
+ insertRows: 10000
+ interlaceRows: 0
+ disorderRange: 1000
+ disorderRatio: 0
+ maxSqlLen: 1048576
+ timeStampStep: 1
+ startTimestamp: 2017-07-14 10:40:00.000
+ sampleFormat:
+ sampleFile:
+ tagsFile:
+ columnCount: 3
+column[0]:FLOAT column[1]:INT column[2]:FLOAT
+ tagCount: 2
+ tag[0]:INT tag[1]:BINARY(16)
+
+ Press enter key to continue or Ctrl-C to stop
+```
+
+After enter, this command will automatically create a super table meters under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai".
+
+It takes about a few minutes to execute this command and ends up inserting a total of 100 million records.
+
+3, Go to the TDengine terminal and view the data generated by taosdemo.
+
+- **Go to the terminal interface.**
+
+```bash
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
+
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos>
+```
+
+- **View the database.**
+
+```bash
+$ taos> show databases;
+ name | created_time | ntables | vgroups | ···
+ test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
+ log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
+
+```
+
+- **View Super Tables.**
+
+```bash
+$ taos> use test;
+Database changed.
+
+$ taos> show stables;
+ name | created_time | columns | tags | tables |
+============================================================================================
+ meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
+Query OK, 1 row(s) in set (0.003259s)
+
+```
+
+- **View the table and limit the output to 10 entries.**
+
+```bash
+$ taos> select * from test.t0 limit 10;
+
+DB error: Table does not exist (0.002857s)
+taos> select * from test.d0 limit 10;
+ ts | current | voltage | phase |
+======================================================================================
+ 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
+ 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
+ 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
+ 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
+ 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
+ 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
+Query OK, 10 row(s) in set (0.016791s)
+
+```
+
+- **View the tag values for the d0 table.**
+
+```bash
+$ taos> select groupid, location from test.d0;
+ groupid | location |
+=================================
+ 0 | shanghai |
+Query OK, 1 row(s) in set (0.003490s)
+
+```
+
+## Stop the TDengine service that is running in Docker
+
+```bash
+$ docker stop tdengine
+tdengine
+```
+
+- **docker stop**: Stop the specified running docker image with docker stop.
+- **tdengine**: The name of the container.
+
+## TDengine connected in Docker during programming development
+
+There are two ideas for connecting from outside of Docker to use TDengine services running inside a Docker container:
+
+1, By port mapping (-p), the open network port inside the container is mapped to the specified port of the host. By mounting the local directory (-v), you can synchronize the data inside the host and the container to prevent data loss after the container is deleted.
+
+```bash
+$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
+526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
+
+$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
+{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
+```
+
+- The first command starts a docker container with TDengine running and maps the 6041 port of the container to port 6041 of the host.
+- The second command, accessing TDengine through the RESTful interface, connects to port 6041 on the local machine, so the connection is successful.
+
+Note: In this example, for convenience reasons, only port 6041 is mapped, which is required for RESTful. If you wish to connect to the TDengine service in a non-RESTful manner, you will need to map a total of 11 ports starting at 6030. In the example, mounting the local directory also only deals with the /etc/taos directory where the configuration files are located, but not the data storage directory.
+
+2, Go directly to the docker container to do development via the exec command. That is, put the program code in the same Docker container where the TDengine server is located and connect to the TDengine service local to the container.
+
+```bash
+$ docker exec -it tdengine /bin/bash
+```
diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md
index 3c9d9ac6af54cfd49a4b2700c8c79773f08a2120..50a8c2fabb8c93a847a79a4de47c218de7ccd60a 100644
--- a/documentation20/en/02.getting-started/docs.md
+++ b/documentation20/en/02.getting-started/docs.md
@@ -10,15 +10,15 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo
### Install from Docker Container
-Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
+For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments.
-### Install from Package
+Please refer to the detailed operation in [Quickly experience TDengine through Docker](https://www.taosdata.com/en/documentation/getting-started/docker).
-It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
+### Install from Package
-Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
+Three different packages for TDengine server are provided, please pick up the one you like. (Lite packages only have execution files and connector of C/C++, but standard packages support connectors of nearly all programming languages.) Beta version has more features, but we suggest you to install stable version for production or testing.
-For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
+Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package.
## Quick Launch
@@ -131,7 +131,7 @@ After starting the TDengine server, you can execute the command `taosdemo` in th
$ taosdemo
```
-Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
+Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
@@ -201,7 +201,7 @@ Note: ● has been verified by official tests; ○ has been verified by unoffici
List of platforms supported by TDengine client and connectors
-At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
+At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and operating system such as Linux/Win64/Win32.
Comparison matrix as following:
@@ -218,4 +218,4 @@ Comparison matrix as following:
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
-Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information.
+Please visit Connectors section for more detailed information.
\ No newline at end of file
diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md
index ce8dd6c8be75ae87afcd51fbbecbaf97a274ba3e..b9e21b1d4c775876c77b2c9ec999639f30bd0c00 100644
--- a/documentation20/en/03.architecture/docs.md
+++ b/documentation20/en/03.architecture/docs.md
@@ -4,7 +4,7 @@
### A Typical IoT Scenario
-In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
+In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the data collection devices of the same type, there are often many specific collection devices distributed in places. Big Data processing system aims to collect all kinds of data, then store and analyze them. For the same kind of devices, the data collected are very structured. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
@@ -13,7 +13,6 @@ In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there a
Collected Metrics
Tags
-
Device ID
Time Stamp
@@ -110,165 +109,168 @@ As the data points are a series of data points over time, the data points genera
1. Metrics are always structured data;
2. There are rarely delete/update operations on collected data;
-3. No need for transactions of traditional databases
-4. The ratio of reading is lower but write is higher than typical Internet applications;
-5. data flow is uniform and can be predicted according to the number of devices and collection frequency;
-6. the user pays attention to the trend of data, not a specific value at a specific time;
-7. there is always a data retention policy;
-8. the data query is always executed in a given time range and a subset of space;
-9. in addition to storage and query operations, various statistical and real-time calculation operations are also required;
-10. data volume is huge, a system may generate over 10 billion data points in a day.
+3. Unlike traditional databases, transaction processing is not required;
+4. The ratio of writing over reading is much higher than typical Internet applications;
+5. Data volume is stable and can be predicted according to the number of devices and sampling rate;
+6. The user pays attention to the trend of data, not a specific value at a specific time;
+7. There is always a data retention policy;
+8. The data query is always executed in a given time range and a subset of space;
+9. In addition to storage and query operations, various statistical and real-time computing are also required;
+10. Data volume is huge, a system may generate over 10 billion data points in a day.
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
### Relational Database Model
-Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQL’s key-value storage.
+Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. SQL like syntax is used, instead of NoSQL’s key-value storage.
-### One Table for One Collection Point
+### One Table for One Data Collection Point
-To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
+To utilize this time-series and other data features, TDengine requires the user to create a table for each data collection point to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
-1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
-2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed.
+1. Guarantee that all data from a data collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one data collection point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
+2. Since the data generation process of each data collection device is completely independent, and each data collection point has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the performance.
3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
-If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.**
+If the data of multiple devices are traditionally written into a table, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to be continuously stored together. **One table for each data collection point can ensure the optimal performance of insert and query of a single data collection point to the greatest extent.**
-TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
+TDengine suggests using data collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
### STable: A Collection of Data Points in the Same Type
-The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine.
+The design of one table for each data collection point will require a huge number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between data collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine.
-STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
+STable is an abstract set for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a set of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
-When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation.
+When aggregating multiple data collection points with the same data type, TDengine will first find out the tables that meet the tag filter conditions from the STables, then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation.
## Cluster and Primary Logic Unit
-The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment.
+The design of TDengine is based on the assumption that one single node or software system is unreliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment.
### Primary Logic Unit
Logical structure diagram of TDengine distributed architecture as following:

- Picture 1: TDengine architecture diagram
-
-
+ Figure 1: TDengine architecture diagram
-A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit.
+A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
-**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
+**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
-**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node.
+**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
-**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
+**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
-**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
+**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
-**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
+**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
### Node Communication
-**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission.
+**Communication mode**: The communication among each data node of TDengine system, and among the application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
-**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located.
+**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly.
-**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
+**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
-**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted.
+**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted.
-**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
-**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
+1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
+2. Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
+3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
+
+**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
-**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes.
+**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
-### A Typical Messaging Process
+### A Typical Data Writinfg Process
-To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process.
+To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
-
- Picture 2 typical process of TDengine
+
+ Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
-2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode.
-3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode.
-4. Taosc initiates an insert request to master vnode.
-5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup.
-6. Taosc notifies APP that writing is successful.
+2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
+3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
+4. TAOSC initiates an insert request to master vnode.
+5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
+6. TAOSC notifies APP that writing is successful.
-For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode.
+For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode.
-For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node.
+For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node.
-The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications.
+The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
-Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache.
+Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache.
## Storage Model and Data Partitioning/Sharding
### Storage Model
-The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts:
+The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts:
-- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance.
-- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds.
-- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
+- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database update parameter is set to 1. By adopting the model with one table for each data collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance.
+- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds.
+- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
-- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate.
-- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
+- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate.
+- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
### Data Sharding
-For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
+For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
-VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application.
+VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
-The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations.
+The meta data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is good for efficient and parallel tag filtering operations.
### Data Partitioning
-In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter ‘keep’), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage.
+In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `days`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `keep`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost.
-In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out.
+In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability.
### Load Balancing
-Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected.
+Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected.
-If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number.
+If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `offlineThreshold`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number.
-When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process.
+When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process.
-The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
+The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
## Data Writing and Replication Process
-If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect.
+If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process
Master Vnode uses a writing process as follows:
-Figure 3: TDengine Master writing process
+
+ Figure 3: TDengine Master writing process
-1. Master vnode receives the application data insertion request, verifies, and to next step;
-2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
+1. Master vnode receives the application data insertion request, verifies, and moves to next step;
+2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful writing.
@@ -278,34 +280,34 @@ Figure 3: TDengine Master writing process
For a slave vnode, the write process as follows:
-
- Picture 3 TDengine Slave Writing Process
+
+ Figure 4: TDengine Slave Writing Process
-1. Slave vnode receives a data insertion request forwarded by Master vnode.
-2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
-3. Write into memory and add the record to “skip list”;
+1. Slave vnode receives a data insertion request forwarded by Master vnode;
+2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
+3. Write into memory and add the record to “skip list”.
-Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL.
+Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
### Remote Disaster Recovery and IDC Migration
-As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
+As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
-On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
+On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
-However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows:
+However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows:
-1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down;
+1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
-3. Slave vnode will become the new master, thus losing one record
+3. Slave vnode will become the new master, thus losing one record.
-In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
+In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection
-Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one.
+Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
@@ -318,7 +320,7 @@ See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/do
### Synchronous Replication
-For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
+For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
@@ -336,24 +338,22 @@ Each vnode has its own independent memory, and it is composed of multiple memory
TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth.
-To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
+To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `days`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
-For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
+For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `keep`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
-In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
+In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `maxRows` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
-Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
+Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `minRows` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
-When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
+When data is written to disk, it is decided whether to compress the data according to system configuration parameter `comp`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
### Tiered Storage
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
-
-
dataDir format is as follows:
```
dataDir data_path [tier_level]
@@ -361,8 +361,6 @@ dataDir data_path [tier_level]
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
-
-
Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
@@ -376,7 +374,6 @@ dataDir /mnt/disk6/taos 2
Mounted disks can also be a non-local network disk, as long as the system can access it.
-
Note: Tiered Storage is only supported in Enterprise Edition
## Data Query
@@ -393,17 +390,15 @@ When client obtains query result, the worker thread in query execution queue of
### Aggregation by Time Axis, Downsampling, Interpolation
-The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine.
+The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine.
-The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example:
+The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example:
```mysql
select count(*) from d1001 interval(1h);
```
-According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window.
-
-
+For the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window.
In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example:
@@ -411,24 +406,25 @@ In application scenarios where query results need to be obtained continuously, i
select count(*) from d1001 interval(1h) fill(prev);
```
-According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
+For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
### Multi-table Aggregation Query
-TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
+TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:

- Picture 4 Diagram of multi-table aggregation query
+ Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
-2. taosc sends the STable name to Meta Node(management node);
-3. Management node sends the vnode list owned by the STable back to taosc;
-4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
-5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc;
-6. taosc finally aggregates the results returned by multiple data nodes and send them back to application.
+2. TAOSC sends the STable name to Meta Node(management node);
+3. Management node sends the vnode list owned by the STable back to TAOSC;
+4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
+5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
+6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL.
+
diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md
index 2e69054fa12340cb0e264848d7def4b7685f8796..e28dd906f3b1fae76cf6657c8f946468b92788f0 100644
--- a/documentation20/en/04.model/docs.md
+++ b/documentation20/en/04.model/docs.md
@@ -2,17 +2,15 @@
TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts.
-Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling.
-
## Create a Database
-Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example:
+Different types of data collection points often have different data characteristics, including data sampling rate, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time resolution, max and min number of records in a file block, whether it is compressed or not, and number of days covered by a data file. For example:
```mysql
-CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
+CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1;
```
-The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).
+The above statement will create a database named “power”. The data of this database will be kept for 365 days (data will be automatically deleted 365 days later), one data file will be created per 10 days, the number of memory blocks is 4, and data updating is allowed. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).
After the database created, please use SQL command USE to switch to the new database, for example:
@@ -20,7 +18,7 @@ After the database created, please use SQL command USE to switch to the new data
USE power;
```
-Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use.
+Specify the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database-name.table-name" to specify the name of database to use.
**Note:**
@@ -37,11 +35,11 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
-Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details.
+Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of data collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tags can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details.
-Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp).
+A STable must be created for each type of data collection point, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, a STable for transformers, a STable for buses, a STable for switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, one data collection point captures metrics such as current and voltage, and one data collection point captures environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All metrics contained in a STable must be collected at the same time (with the same timestamp).
-A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables.
+A STable allows up to 1024 columns. If the number of metrics collected at a data collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables.
## Create a Table
@@ -53,22 +51,23 @@ CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details.
-**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table.
+**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this way to create a table.
TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value.
-**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example:
+**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table creating syntax when writing data. If the table already exists, no new table will be created. For example:
```mysql
INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
```
-The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time.
+The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to create it automatically, and the tag value "Beijing.Chaoyang", 2 is set at the same time.
For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section.
## Multi-column Model vs Single-column Model
-TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase.
+TDengine supports multi-column model. As long as metrics are collected simultaneously by a data collection point (with the same timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which a STable is created for each metric. For smart meter example, we need to create 3 Stables, one for current, one for voltage and one for phase.
+
+TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the schema definition of STable needs to be modified frequently and the application becomes complicated. To avoid that, single-column model is recommended.
-TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended.
diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md
index 88746ea60867b37e5956075f88c48ebd8276dfaa..7e99cf09dbae6a09429c83810f07db6ef4dafbe7 100644
--- a/documentation20/en/05.insert/docs.md
+++ b/documentation20/en/05.insert/docs.md
@@ -1,22 +1,22 @@
# Efficient Data Writing
-TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
+TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion.
-## SQL Writing
+## Data Writing via SQL
-Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
+Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, C#, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
-TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
+TDengine supports writing multiple records in a single statement. For example, the following command writes two records to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
```
-TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
+TDengine also supports writing data to multiple tables in a single statement. For example, the following command writes two records to d1001 and one record to d1002:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
@@ -26,22 +26,22 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd
**Tips:**
-- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
-- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
-- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
+- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record size cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
+- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much thread switching brings extra overhead.
+- For the same table, if the timestamp of a newly inserted record already exists, the new record will be discarded as default (database option update = 0), that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
-## Direct Writing of Prometheus
+## Data Writing via Prometheus
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_prometheus From Source
-Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
+Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to prepare:
- A server running Linux OS
- Golang version 1.10 and higher installed
-- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
+- Since the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side. For example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
@@ -134,7 +134,7 @@ The format of generated data by Prometheus is as follows:
}
```
-Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
+Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDengine, you can check whether this data was successfully written through the following instruction.
```mysql
use prometheus;
@@ -144,7 +144,7 @@ select * from apiserver_request_latencies_bucket;
-## Direct Writing of Telegraf
+## Data Writing via Telegraf
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
@@ -271,12 +271,12 @@ select * from cpu;
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
-## Direct Writing of EMQ Broker
+## Data Writing via EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
-## Direct Writing of HiveMQ Broker
+## Data Writing via HiveMQ Broker
-[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
+[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
\ No newline at end of file
diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md
index c4f1359820a28b390e84be93e077fecb1d5ede0e..d906443153bb7e83cee69da4588554893ce154a3 100644
--- a/documentation20/en/06.queries/docs.md
+++ b/documentation20/en/06.queries/docs.md
@@ -2,7 +2,7 @@
## Main Query Features
-TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
+TDengine uses SQL as the query language. Applications can send SQL statements through C/C++, Java, Go, C#, Python, Node.js connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
@@ -28,7 +28,7 @@ For specific query syntax, please see the [Data Query section of TAOS SQL](https
## Multi-table Aggregation Query
-In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
+In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the static attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
@@ -55,7 +55,7 @@ TDengine only allows aggregation queries between tables belonging to a same STab
## Down Sampling Query, Interpolation
-In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
+In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword `interval`, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
```mysql
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
@@ -94,6 +94,6 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
Query OK, 5 row(s) in set (0.001521s)
```
-In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
+In IoT scenario, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be used to solve the problem easily. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
-For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
\ No newline at end of file
+For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
diff --git a/documentation20/en/07.advanced-features/docs.md b/documentation20/en/07.advanced-features/docs.md
index d9103c70216772a9ce24f67c719b379b106a9055..38c70862b637daf5840606535971e412d938b9e8 100644
--- a/documentation20/en/07.advanced-features/docs.md
+++ b/documentation20/en/07.advanced-features/docs.md
@@ -9,8 +9,8 @@ Continuous query of TDengine adopts time-driven mode, which can be defined direc
The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways:
- Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59.
-- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated.
-- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback.
+- If a history record is written to the time interval that has been calculated, the continuous query will not re-calculate and will not push the new results to the user again.
+- TDengine server does not cache or save the client's status, nor does it provide Exactly-Once semantic guarantee. If the application crashes, the continuous query will be pull up again and starting time must be provided by the application.
### How to use continuous query
@@ -29,7 +29,7 @@ We already know that the average voltage of these meters can be counted with one
select avg(voltage) from meters interval(1m) sliding(30s);
```
-Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly:
+Every time this statement is executed, all data will be re-calculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly:
```sql
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
@@ -65,7 +65,7 @@ It should be noted that now in the above example refers to the time when continu
### Manage the Continuous Query
-Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands.
+Users can view all continuous queries running in the system through the `show streams` command in the console, and can kill the corresponding continuous queries through the `kill stream` command. Subsequent versions will provide more finer-grained and convenient continuous query management commands.
## Publisher/Subscriber
@@ -101,7 +101,7 @@ Another method is to query the STable. In this way, no matter how many meters th
select * from meters where ts > {last_timestamp} and current > 10;
```
-However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed.
+However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data writing are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed.
The subscription function of TDengine provides a thorough solution to the above problem.
@@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
-In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
+In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
\ No newline at end of file
diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..16adf906bea85d538ac408e1c40b18160aceed78
--- /dev/null
+++ b/documentation20/en/08.connector/01.java/docs.md
@@ -0,0 +1,525 @@
+# Java connector
+
+## Introduction
+
+The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (supported from taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally.
+
+
+
+The figure above shows the three ways Java applications can access the TDengine:
+
+* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write or query requests to the taosd instance on physical node2 (pnode2).
+* RESTful: The Java application sends the SQL to the RESTful connector on physical node2 (pnode2), which then calls the client API (libtaos.so).
+* JDBC-RESTful: The Java application uses the JDBC-restful API to encapsulate SQL into a RESTful request and send it to the RESTful connector of physical node 2.
+
+In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, there are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be watched:
+
+* deleting a record is not supported in TDengine.
+* transaction is not supported in TDengine.
+
+### Difference between JDBC-JNI and JDBC-restful
+
+
+Difference JDBC-JNI JDBC-RESTful
+
+ Supported OS
+ linux、windows
+ all platform
+
+
+ Whether to install the Client
+ need
+ do not need
+
+
+ Whether to upgrade the client after the server is upgraded
+ need
+ do not need
+
+
+ Write performance
+ JDBC-RESTful is 50% to 90% of JDBC-JNI
+
+
+ Read performance
+ JDBC-RESTful is no different from JDBC-JNI
+
+
+
+**Note**: RESTful interfaces are stateless. Therefore, when using JDBC-restful, you should specify the database name in SQL before all table names and super table names, for example:
+
+```sql
+INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
+```
+
+## JDBC driver version and supported TDengine and JDK versions
+
+| taos-jdbcdriver | TDengine | JDK |
+| -------------------- | ----------------- | -------- |
+| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x |
+| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x |
+| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
+| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
+| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
+| 1.0.3 | 1.6.1.x and above | 1.8.x |
+| 1.0.2 | 1.6.1.x and above | 1.8.x |
+| 1.0.1 | 1.6.1.x and above | 1.8.x |
+
+## DataType in TDengine and Java connector
+
+The TDengine supports the following data types and Java data types:
+
+| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) |
+| ----------------- | ------------------ | ------------------ |
+| TIMESTAMP | java.lang.Long | java.sql.Timestamp |
+| INT | java.lang.Integer | java.lang.Integer |
+| BIGINT | java.lang.Long | java.lang.Long |
+| FLOAT | java.lang.Float | java.lang.Float |
+| DOUBLE | java.lang.Double | java.lang.Double |
+| SMALLINT | java.lang.Short | java.lang.Short |
+| TINYINT | java.lang.Byte | java.lang.Byte |
+| BOOL | java.lang.Boolean | java.lang.Boolean |
+| BINARY | java.lang.String | byte array |
+| NCHAR | java.lang.String | java.lang.String |
+
+## Install Java connector
+
+### Runtime Requirements
+
+To run TDengine's Java connector, the following requirements shall be met:
+
+1. A Linux or Windows System
+
+2. Java Runtime Environment 1.8 or later
+
+3. TDengine client (required for JDBC-JNI, not required for JDBC-restful)
+
+**Note**:
+
+* After the TDengine client is successfully installed on Linux, the libtaos.so file is automatically copied to /usr/lib/libtaos.so, which is included in the Linux automatic scan path and does not need to be specified separately.
+* After the TDengine client is installed on Windows, the taos.dll file that the driver package depends on is automatically copied to the default search path C:/Windows/System32. You do not need to specify it separately.
+
+### Obtain JDBC driver by maven
+
+To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver). Add the following dependencies in pom.xml for your maven projects.
+
+```xml
+
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 2.0.34
+
+
+```
+
+### Obtain JDBC driver by compiling source code
+
+You can download the TDengine source code and compile the latest version of the JDBC Connector.
+
+ ```shell
+ git clone https://github.com/taosdata/TDengine.git
+ cd TDengine/src/connector/jdbc
+ mvn clean package -Dmaven.test.skip=true
+ ```
+
+a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory.
+
+## Usage of java connector
+
+### Establishing a Connection
+
+#### Establishing a connection with URL
+
+Establish the connection by specifying the URL, as shown below:
+
+```java
+String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+
+In the example above, the JDBC-RESTful driver is used to establish a connection to the hostname of 'taosdemo.com', port of 6041, and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
+
+The JDBC-RESTful does not depend on the local function library. Compared with JDBC-JNI, only the following is required:
+
+* DriverClass designated as "com.taosdata.jdbc.rs.RestfulDriver"
+* JdbcUrl starts with "JDBC:TAOS-RS://"
+* Use port 6041 as the connection port
+
+For better write and query performance, Java applications can use the JDBC-JNI driver, as shown below:
+
+```java
+String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
+Connection conn = DriverManager.getConnection(jdbcUrl);
+```
+
+In the example above, The JDBC-JNI driver is used to establish a connection to the hostname of 'taosdemo.com', port 6030 (TDengine's default port), and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
+
+
+
+The format of JDBC URL is:
+
+```url
+jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]
+```
+
+The configuration parameters in the URL are as follows:
+
+* user: user name for logging in to the TDengine. The default value is 'root'.
+* password: the user login password. The default value is 'taosdata'.
+* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg` on Windows.
+* charset: character set used by the client. The default value is the system character set.
+* locale: client locale. The default value is the current system locale.
+* timezone: timezone used by the client. The default value is the current timezone of the system.
+* batchfetch: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
+* timestampFormat: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
+* batchErrorIgnore: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
+
+#### Establishing a connection with URL and Properties
+
+In addition to establish the connection with the specified URL, you can also use Properties to specify the parameters to set up the connection, as shown below:
+
+```java
+public Connection getConn() throws Exception{
+ String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
+ // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+}
+```
+
+In the example above, JDBC-JNI is used to establish a connection to hostname of 'taosdemo.com', port at 6030, and database name of 'test'. The annotation is the method when using JDBC-RESTful. The connection specifies the user name as 'root' and the password as 'taosdata' in the URL, and the character set to use, locale, time zone, and so on in connProps.
+
+The configuration parameters in properties are as follows:
+
+* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is 'root'.
+* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is 'taosdata'.
+* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg on Windows`.
+* TSDBDriver.PROPERTY_KEY_CHARSET: character set used by the client. The default value is the system character set.
+* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale.
+* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system.
+* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
+* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
+* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
+
+#### Establishing a connection with configuration file
+
+When JDBC-JNI is used to connect to the TDengine cluster, you can specify firstEp and secondEp parameters of the cluster in the client configuration file. As follows:
+
+1. The hostname and port are not specified in Java applications
+
+```java
+public Connection getConn() throws Exception{
+ String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
+ Properties connProps = new Properties();
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
+ connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
+ Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
+ return conn;
+}
+```
+
+2. Specify firstEp and secondEp in the configuration file
+
+```txt
+# first fully qualified domain name (FQDN) for TDengine system
+firstEp cluster_node1:6030
+# second fully qualified domain name (FQDN) for TDengine system, for cluster only
+secondEp cluster_node2:6030
+```
+
+In the above example, JDBC driver uses the client configuration file to establish a connection to the hostname of 'cluster_node1', port 6030, and database name of 'test'. When the firstEp node in the cluster fails, JDBC will try to connect to the cluster using secondEp. In the TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established.
+
+**Note**: In this case, the configuration file belongs to TDengine client which is running inside a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'.
+
+#### Priority of the parameters
+
+If the parameters in the URL, Properties, and client configuration file are repeated set, the priorities of the parameters in descending order are as follows:
+
+1. URL parameters
+2. Properties
+3. Client configuration file in taos.cfg
+
+For example, if you specify password as 'taosdata' in the URL and password as 'taosdemo' in the Properties, JDBC will establish a connection using the password in the URL.
+
+For details, see Client Configuration:[client configuration](https://www.taosdata.com/en/documentation/administrator#client)
+
+### Create database and table
+
+```java
+Statement stmt = conn.createStatement();
+// create database
+stmt.executeUpdate("create database if not exists db");
+// use database
+stmt.executeUpdate("use db");
+// create table
+stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
+```
+
+### Insert
+
+```java
+// insert data
+int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
+System.out.println("insert " + affectedRows + " rows.");
+```
+
+**Note**: 'now' is an internal system function. The default value is the current time of the computer where the client resides. 'now + 1s' indicates that the current time on the client is added by one second. The following time units are a(millisecond), s (second), m(minute), h(hour), d(day), w(week), n(month), and y(year).
+
+### Query
+
+```java
+// query data
+ResultSet resultSet = stmt.executeQuery("select * from tb");
+Timestamp ts = null;
+int temperature = 0;
+float humidity = 0;
+while(resultSet.next()){
+ ts = resultSet.getTimestamp(1);
+ temperature = resultSet.getInt(2);
+ humidity = resultSet.getFloat("humidity");
+ System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
+}
+```
+
+**Note**: The query is consistent with the operation of the relational database, and the index in ResultSet starts from 1.
+
+### Handle exceptions
+
+```java
+try (Statement statement = connection.createStatement()) {
+ // executeQuery
+ ResultSet resultSet = statement.executeQuery(sql);
+ // print result
+ printResult(resultSet);
+} catch (SQLException e) {
+ System.out.println("ERROR Message: " + e.getMessage());
+ System.out.println("ERROR Code: " + e.getErrorCode());
+ e.printStackTrace();
+}
+```
+
+The Java connector may report three types of error codes: JDBC Driver (error codes ranging from 0x2301 to 0x2350), JNI method (error codes ranging from 0x2351 to 0x2400), and TDengine Error. For details about the error code, see:
+
+- https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+- https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
+
+### Write data through parameter binding
+
+Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful)
+
+```java
+Statement stmt = conn.createStatement();
+Random r = new Random();
+
+// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns
+TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
+
+s.setTableName("w1");
+
+// set tags
+s.setTagInt(0, r.nextInt(10));
+s.setTagString(1, "Beijing");
+int numOfRows = 10;
+
+// set values
+ArrayList ts = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ ts.add(System.currentTimeMillis() + i);
+}
+s.setTimestamp(0, ts);
+ArrayList s1 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s1.add(r.nextInt(100));
+}
+s.setInt(1, s1);
+ArrayList s2 = new ArrayList<>();
+for (int i = 0; i < numOfRows; i++){
+ s2.add("test" + r.nextInt(100));
+}
+s.setString(2, s2, 10);
+
+// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
+s.columnDataAddBatch();
+s.columnDataExecuteBatch();
+// Clear the cache, after which you can bind new data(including table names, tags, values):
+s.columnDataClearBatch();
+s.columnDataCloseBatch();
+```
+
+The methods used to set tags are:
+
+```java
+public void setTagNull(int index, int type)
+public void setTagBoolean(int index, boolean value)
+public void setTagInt(int index, int value)
+public void setTagByte(int index, byte value)
+public void setTagShort(int index, short value)
+public void setTagLong(int index, long value)
+public void setTagTimestamp(int index, long value)
+public void setTagFloat(int index, float value)
+public void setTagDouble(int index, double value)
+public void setTagString(int index, String value)
+public void setTagNString(int index, String value)
+```
+
+The methods used to set columns are:
+
+```java
+public void setInt(int columnIndex, ArrayList list) throws SQLException
+public void setFloat(int columnIndex, ArrayList list) throws SQLException
+public void setTimestamp(int columnIndex, ArrayList list) throws SQLException
+public void setLong(int columnIndex, ArrayList list) throws SQLException
+public void setDouble(int columnIndex, ArrayList list) throws SQLException
+public void setBoolean(int columnIndex, ArrayList list) throws SQLException
+public void setByte(int columnIndex, ArrayList list) throws SQLException
+public void setShort(int columnIndex, ArrayList list) throws SQLException
+public void setString(int columnIndex, ArrayList list, int size) throws SQLException
+public void setNString(int columnIndex, ArrayList list, int size) throws SQLException
+```
+
+**Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter.
+
+### Data Subscription
+
+#### Subscribe
+
+```java
+TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
+```
+
+parameters:
+
+* topic: the unique topic name of the subscription.
+* sql: a select statement.
+* restart: true if restart the subscription already exists; false if continue the previous subscription.
+
+In the example above, a subscription named 'topic' is created which use the SQL statement 'select * from meters'. If the subscription already exists, it will continue with the previous query progress, rather than consuming all the data from scratch.
+
+#### Consume
+
+```java
+int total = 0;
+while(true) {
+ TSDBResultSet rs = sub.consume();
+ int count = 0;
+ while(rs.next()) {
+ count++;
+ }
+ total += count;
+ System.out.printf("%d rows consumed, total %d\n", count, total);
+ Thread.sleep(1000);
+}
+```
+
+The consume method returns a result set containing all the new data so far since the last consume. Make sure to call consume as often as you need (like Thread.sleep(1000) in the example), otherwise you will put unnecessary stress on the server.
+
+#### Close
+
+```java
+sub.close(true);
+// release resources
+resultSet.close();
+stmt.close();
+conn.close();
+```
+
+The close method closes a subscription. If the parameter is true, the subscription progress information is reserved, and a subscription with the same name can be created later to continue consuming data. If false, the subscription progress is not retained.
+
+**Note**: the connection must be closed; otherwise, a connection leak may occur.
+
+## Connection Pool
+
+### HikariCP example
+
+```java
+public static void main(String[] args) throws SQLException {
+ HikariConfig config = new HikariConfig();
+ // jdbc properties
+ config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
+ config.setUsername("root");
+ config.setPassword("taosdata");
+ // connection pool configurations
+ config.setMinimumIdle(10); //minimum number of idle connection
+ config.setMaximumPoolSize(10); //maximum number of connection in the pool
+ config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
+ config.setMaxLifetime(0); // maximum life time for each connection
+ config.setIdleTimeout(0); // max idle time for recycle idle connection
+ config.setConnectionTestQuery("select server_status()"); //validation query
+ HikariDataSource ds = new HikariDataSource(config); //create datasource
+ Connection connection = ds.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+ //query or insert
+ // ...
+ connection.close(); // put back to conneciton pool
+}
+```
+
+### Druid example
+
+```java
+public static void main(String[] args) throws Exception {
+ DruidDataSource dataSource = new DruidDataSource();
+ // jdbc properties
+ dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
+ dataSource.setUrl(url);
+ dataSource.setUsername("root");
+ dataSource.setPassword("taosdata");
+ // pool configurations
+ dataSource.setInitialSize(10);
+ dataSource.setMinIdle(10);
+ dataSource.setMaxActive(10);
+ dataSource.setMaxWait(30000);
+ dataSource.setValidationQuery("select server_status()");
+ Connection connection = dataSource.getConnection(); // get connection
+ Statement statement = connection.createStatement(); // get statement
+ //query or insert
+ // ...
+ connection.close(); // put back to conneciton pool
+}
+```
+
+**Note**:
+
+As of TDengine V1.6.4.1, the function select server_status() is supported specifically for heartbeat detection, so it is recommended to use select server_status() for Validation queries when using connection pools.
+
+Select server_status() returns 1 on success, as shown below.
+
+```sql
+taos> select server_status();
+server_status()|
+================
+1 |
+Query OK, 1 row(s) in set (0.000141s)
+```
+
+## Integrated with framework
+
+- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate.
+- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate.
+
+## Example Codes
+
+you see sample code here: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
+
+## FAQ
+
+- java.lang.UnsatisfiedLinkError: no taos in java.library.path
+
+ **Cause**:The application program cannot find Library function *taos*
+
+ **Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
+
+- java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
+
+ **Cause**:Currently TDengine only support 64bit JDK
+
+ **Answer**:re-install 64bit JDK.
+
+- For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues)
+
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
index 9cbd3952068d8eac23ffa9bcd7497ff158a21d86..fd9d129e50fa4450aed2fbebe80eddb978ef1263 100644
--- a/documentation20/en/08.connector/docs.md
+++ b/documentation20/en/08.connector/docs.md
@@ -66,7 +66,11 @@ Run install_client.sh to install.
Edit the taos.cfg file (default path/etc/taos/taos.cfg) and change firstEP to End Point of the TDengine server, for example: [h1.taos.com](http://h1.taos.com/):6030.
-**Tip: If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.**
+**Tip: **
+
+**1. If no TDengine service deployed in this machine, but only the application driver is installed, only firstEP needs to be configured in taos.cfg, and FQDN does not.**
+
+**2. To prevent “unable to resolve FQDN” error when connecting to the server, ensure that the hosts file of the client has the correct FQDN value.**
**Windows x64/x86**
@@ -128,7 +132,7 @@ taos>
**Windows (x64/x86) environment:**
-Under cmd, enter the c:\ tdengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example:
+Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and you should be able to connect to tdengine service normally and jump to taos shell interface. For example:
```mysql
C:\TDengine>taos
@@ -296,9 +300,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective
The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed.
-
-
### Parameter binding API
In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows:
@@ -411,11 +413,11 @@ See [video tutorials](https://www.taosdata.com/blog/2020/11/11/1963.html) for th
Users can find the connector package for python2 and python3 in the source code src/connector/python (or tar.gz/connector/python) folder. Users can install it through `pip` command:
-`pip install src/connector/python/linux/python2/`
+`pip install src/connector/python/`
or
- `pip3 install src/connector/python/linux/python3/`
+ `pip3 install src/connector/python/`
#### Windows
@@ -823,12 +825,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html
The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details.
-Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html).
+Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go .
```Go
import (
"database/sql"
- _ "github.com/taosdata/driver-go/taosSql"
+ _ "github.com/taosdata/driver-go/v2/taosSql"
)
```
@@ -839,6 +841,8 @@ go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
```
+`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`.
+
### Common APIs
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
@@ -937,7 +941,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh
Steps:
-1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js).
+1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js).
2. Execute the following command:
diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md
index e759da31677a5344a0f6578c751c4b77f86a43db..19544af0fa50af258f975532ad8399fcb8588b42 100644
--- a/documentation20/en/09.connections/docs.md
+++ b/documentation20/en/09.connections/docs.md
@@ -2,11 +2,11 @@
## Grafana
-TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
+TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
### Install Grafana
-TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
+TDengine currently supports Grafana 6.2 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
@@ -64,7 +64,7 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard
-A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
+A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md
index d7d908ff424270d9aa33f89eefd36e73f6ab68b2..864bc46200767468561ff940f3ac271d558c833c 100644
--- a/documentation20/en/10.cluster/docs.md
+++ b/documentation20/en/10.cluster/docs.md
@@ -1,8 +1,8 @@
# TDengine Cluster Management
-Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function.
+Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node TDengine.
-Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node.
+Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter `fqdn` in taos.cfg. If you want to access via direct IP address, you can set the parameter `fqdn` to the IP address of this node.
The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail.
@@ -12,11 +12,11 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19
**Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS]
-**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)"
+**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please backup all data, then delete it and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)"
**Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up;
-**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file.
+**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify hosts file.
**Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built;
@@ -136,7 +136,7 @@ Execute the CLI program taos, log in to the TDengine system using the root accou
DROP DNODE "fqdn:port";
```
-Where fqdn is the FQDN of the deleted node, and port is the port number of its external server.
+Where fqdn is the FQDN of the deleted node, and port is the port number.
**【Note】**
@@ -185,7 +185,7 @@ Because of the introduction of vnode, it is impossible to simply draw a conclusi
TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously.
-A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos:
+A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically selected by the system based on the resource on the whole. User can execute the following command in the console of TDengine through the CLI program taos:
```
SHOW MNODES;
@@ -213,7 +213,7 @@ When the above three situations occur, the system will start a load computing of
If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations:
-- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again.
+- If the data node is offline for more than a certain period of time (configuration parameter `offlineThreshold` in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again.
- After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally.
**Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again.
@@ -229,7 +229,7 @@ The name of the executable for Arbitrator is tarbitrator. The executable has alm
1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install.
-2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042.
+2. The command line parameter -p of this application can specify the port number of its service, and the default is 6042.
3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.)
4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb".
diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md
index 3817a41766d515d663661fd4382c883e0d8f179b..a2c2486b8e96cab95fad0f90470726d508dd63f7 100644
--- a/documentation20/en/11.administrator/docs.md
+++ b/documentation20/en/11.administrator/docs.md
@@ -22,8 +22,8 @@ If there is plenty of memory, the configuration of Blocks can be increased so th
CPU requirements depend on the following two aspects:
-- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches.
-- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine.
+- **Data insertion**: TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of records per insert, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches.
+- **Query**: TDengine provides efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to estimate.
Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources.
@@ -78,7 +78,7 @@ When the nodes in TDengine cluster are deployed on different physical machines a
## Server-side Configuration
-The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory.
+The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter `-c` from the taosd command line. Such as `taosd -c /home/user`, to specify that the configuration file is located in the /home/user directory.
You can also use “-C” to show the current server configuration parameters:
@@ -88,14 +88,14 @@ taosd -C
Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.**
-- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030.
-- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node.
+- firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030.
+- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node.
- serverPort: the port number of the external service after taosd started, the default value is 6030.
- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041.
- dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos).
- logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos).
-- arbitrator: the end point of the arbiter in the system; the default value is null.
-- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated
+- arbitrator: the end point of the arbitrator in the system; the default value is null.
+- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; cannot be an mnode, only vnode can be allocated
- debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values).
- numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines.
- logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days.
@@ -161,18 +161,18 @@ For example:
## Client Configuration
-The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg.
+The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter `-c` to specify the configuration file directory, such as `taos -c /home/cfg`, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information `taos --help`. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg.
**Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters**
```bash
-taos -C 或 taos --dump-config
+taos -C or taos --dump-config
```
Client configuration parameters:
- firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030.
-- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp.
+- secondEp: when taos starts, if unable to connect to firstEp, it will try to connect to secondEp.
- locale
Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
@@ -493,4 +493,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot
| CONCAT | GLOB | METRICS | SET | VIEW |
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
-| CONNECTION | | | | |
+| CONNECTION | | | | |
\ No newline at end of file
diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md
index dfa1742c999adbf4a3e7846955dc8a564339d0c2..7aaeb6c32b25cef8f0d1bf2f67ef94c3a2a007ee 100644
--- a/documentation20/en/12.taos-sql/docs.md
+++ b/documentation20/en/12.taos-sql/docs.md
@@ -1,8 +1,8 @@
# TAOS SQL
-TDengine provides a SQL-style language, TAOS SQL, to insert or query data, and support other common tips. To finish this document, you should have some understanding about SQL.
+TDengine provides a SQL-style language, TAOS SQL, to insert or query data. This document introduces TAOS SQL and supports other common tips. To read through this document, readers should have basic understanding about SQL.
-TAOS SQL is the main tool for users to write and query data to TDengine. TAOS SQL provides a style and mode similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for temporal structured data, the relevant function of data deletion is non-existent in TAO SQL.
+TAOS SQL is the main tool for users to write and query data into/from TDengine. TAOS SQL provides a syntax style similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion functionality for time-series data, the relevant functions of data deletion is unsupported in TAO SQL.
Let’s take a look at the conventions used for syntax descriptions.
@@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin
- Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000.
- Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units.
-Default time precision of TDengine is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond.
+TDengine's timestamp is set to millisecond accuracy by default. Microsecond/nanosecond accuracy can be set using CREATE DATABASE with PRECISION parameter. (Nanosecond resolution is supported from version 2.1.5.0 onwards.)
In TDengine, the following 10 data types can be used in data model of an ordinary table.
@@ -127,7 +127,7 @@ Note:
ALTER DATABASE db_name CACHELAST 0;
```
CACHELAST parameter controls whether last_row of the data subtable is cached in memory. The default value is 0, and the value range is [0, 1]. Where 0 means not enabled and 1 means enabled. (supported from version 2.0. 11)
-
+
**Tips**: After all the above parameters are modified, show databases can be used to confirm whether the modification is successful.
- **Show all databases in system**
@@ -138,14 +138,17 @@ Note:
## Table Management
-- Create a table
-Note:
+- **Create a table**
-1. The first field must be a timestamp, and system will set it as the primary key;
-2. The max length of table name is 192;
-3. The length of each row of the table cannot exceed 16k characters;
-4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers
-5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes;
+ ```mysql
+ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]);
+ ```
+ Note:
+ 1. The first field must be a timestamp, and system will set it as the primary key;
+ 2. The max length of table name is 192;
+ 3. The length of each row of the table cannot exceed 16k characters;
+ 4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers
+ 5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes;
- **Create a table via STable**
@@ -171,10 +174,10 @@ Note:
Note:
1. The method of batch creating tables requires that the data table must use STable as a template.
- 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table building.
+ 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table creating.
- **Drop a table**
-
+
```mysql
DROP TABLE [IF EXISTS] tb_name;
```
@@ -218,7 +221,7 @@ Note:
## STable Management
-Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word.
+Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word.
- **Create a STable**
@@ -290,7 +293,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That
Modify a tag name of STable. After modifying, all sub-tables under the STable will automatically update the new tag name.
- **Modify a tag value of sub-table**
-
+
```mysql
ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
```
@@ -306,7 +309,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That
Insert a record into table tb_name.
- **Insert a record with data corresponding to a given column**
-
+
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...);
```
@@ -320,14 +323,14 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That
Insert multiple records into table tb_name.
- **Insert multiple records into a given column**
-
+
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
Insert multiple records into a given column of table tb_name.
- **Insert multiple records into multiple tables**
-
+
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
@@ -421,7 +424,7 @@ taos> SELECT * FROM d1001;
Query OK, 3 row(s) in set (0.001165s)
```
-For Stables, wildcards contain *tag columns*.
+For STables, wildcards contain *tag columns*.
```mysql
taos> SELECT * FROM meters;
@@ -720,7 +723,7 @@ TDengine supports aggregations over data, they are listed below:
================================================
9 | 9 |
Query OK, 1 row(s) in set (0.004475s)
-
+
taos> SELECT COUNT(*), COUNT(voltage) FROM d1001;
count(*) | count(voltage) |
================================================
@@ -758,7 +761,7 @@ TDengine supports aggregations over data, they are listed below:
```
- **TWA**
-
+
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
@@ -799,7 +802,7 @@ TDengine supports aggregations over data, they are listed below:
================================================================================
35.200000763 | 658 | 0.950000018 |
Query OK, 1 row(s) in set (0.000980s)
- ```
+ ```
- **STDDEV**
@@ -896,7 +899,7 @@ TDengine supports aggregations over data, they are listed below:
======================================
13.40000 | 223 |
Query OK, 1 row(s) in set (0.001123s)
-
+
taos> SELECT MAX(current), MAX(voltage) FROM d1001;
max(current) | max(voltage) |
======================================
@@ -937,8 +940,6 @@ TDengine supports aggregations over data, they are listed below:
Query OK, 1 row(s) in set (0.001023s)
```
--
-
- **LAST**
```mysql
@@ -972,7 +973,7 @@ TDengine supports aggregations over data, they are listed below:
```
- **TOP**
-
+
```mysql
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
@@ -1029,7 +1030,7 @@ TDengine supports aggregations over data, they are listed below:
2018-10-03 14:38:15.000 | 218 |
2018-10-03 14:38:16.650 | 218 |
Query OK, 2 row(s) in set (0.001332s)
-
+
taos> SELECT BOTTOM(current, 2) FROM d1001;
ts | bottom(current, 2) |
=================================================
@@ -1092,7 +1093,7 @@ TDengine supports aggregations over data, they are listed below:
=======================
12.30000 |
Query OK, 1 row(s) in set (0.001238s)
-
+
taos> SELECT LAST_ROW(current) FROM d1002;
last_row(current) |
=======================
@@ -1146,7 +1147,7 @@ TDengine supports aggregations over data, they are listed below:
============================
5.000000000 |
Query OK, 1 row(s) in set (0.001792s)
-
+
taos> SELECT SPREAD(voltage) FROM d1001;
spread(voltage) |
============================
@@ -1172,7 +1173,7 @@ TDengine supports aggregations over data, they are listed below:
## Time-dimension Aggregation
-TDengine supports aggregating by intervals. Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows:
+TDengine supports aggregating by intervals (time range). Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows:
```mysql
SELECT function_list FROM tb_name
@@ -1235,11 +1236,11 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
**Restrictions on group by**
-TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and whichhas less than 100,000 unique values.
+TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and which has less than 100,000 unique values.
**Restrictions on join operation**
-TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four operations after tables aggregated for the time being.
+TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four arithmetic operations after tables aggregated for the time being.
**Availability of is no null**
diff --git a/minidevops/README.MD b/minidevops/README.MD
index 9937ad04ad91a4aacdf41f75294052af5024e4cb..dcc8c2fa960191dd756b3acc76489a2147b5b12b 100644
--- a/minidevops/README.MD
+++ b/minidevops/README.MD
@@ -218,8 +218,4 @@ use telegraf;
使用telegraf这个数据库。然后执行show tables,describe table等命令详细查询下telegraf这个库里保存了些什么数据。
具体TDengine的查询语句可以参考[TDengine官方文档](https://www.taosdata.com/cn/documentation/taos-sql/)
## 接入多个监控对象
-<<<<<<< HEAD
就像前面原理介绍的,这个miniDevops的小系统,已经提供了一个时序数据库和可视化系统,对于多台机器的监控,只需要将每台机器的telegraf或prometheus配置按上面所述修改,就可以完成监控数据采集和可视化呈现了。
-=======
-就像前面原理介绍的,这个miniDevops的小系统,已经提供了一个时序数据库和可视化系统,对于多台机器的监控,只需要将每台机器的telegraf或prometheus配置按上面所述修改,就可以完成监控数据采集和可视化呈现了。
->>>>>>> 740f82af58c4ecc2deecfa36fb1de4ef5ee55efc
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 3ae4e9941e96abb4c93b99ae86c40b3e3583bd08..310369aa14ad5e9e6ccb49843605a92fdc333563 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -194,6 +194,9 @@ keepColumnName 1
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
+# database name must be specified in restful interface if the following parameter is set, off by default
+# httpDbNameMandatory 1
+
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
@@ -284,3 +287,5 @@ keepColumnName 1
# 0 no query allowed, queries are disabled
# queryBufferSize -1
+# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
+# tsdbMetaCompactRatio 0
diff --git a/packaging/release.sh b/packaging/release.sh
index 5ba6c01a0bd5689278bdb5c86b538b3c447f086a..44887c6cf749ecfecdef46799311de38dbbbed23 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -22,7 +22,7 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
pagMode=full # [full | lite]
soMode=dynamic # [static | dynamic]
-dbName=taos # [taos | power | tq]
+dbName=taos # [taos | power | tq | pro]
allocator=glibc # [glibc | jemalloc]
verNumber=""
verNumberComp="1.0.0.0"
@@ -78,7 +78,7 @@ do
echo " -l [full | lite] "
echo " -a [glibc | jemalloc] "
echo " -s [static | dynamic] "
- echo " -d [taos | power | tq ] "
+ echo " -d [taos | power | tq | pro] "
echo " -n [version number] "
echo " -m [compatible version number] "
exit 0
@@ -253,6 +253,10 @@ if [ "$osType" != "Darwin" ]; then
${csudo} ./makepkg_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
${csudo} ./makearbi_tq.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
+ elif [[ "$dbName" == "pro" ]]; then
+ ${csudo} ./makepkg_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
+ ${csudo} ./makeclient_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
+ ${csudo} ./makearbi_pro.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName} ${verNumberComp}
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
@@ -262,4 +266,3 @@ else
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
fi
-
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index e116d72d2649940f9d272b8d3d01e34576a4049d..9c6a6e62f5b5fda1cfbaf1b5fff9593a5e349271 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -102,6 +102,12 @@ elif echo $osinfo | grep -qwi "centos" ; then
elif echo $osinfo | grep -qwi "fedora" ; then
# echo "This is fedora system"
os_type=2
+elif echo $osinfo | grep -qwi "Linx" ; then
+# echo "This is Linx system"
+ os_type=1
+ service_mod=0
+ initd_mod=0
+ service_config_dir="/etc/systemd/system"
else
echo " osinfo: ${osinfo}"
echo " This is an officially unverified linux system,"
diff --git a/packaging/tools/install_arbi_pro.sh b/packaging/tools/install_arbi_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..11165dbdd8bdf6afb4659250499cf1d9184c2395
--- /dev/null
+++ b/packaging/tools/install_arbi_pro.sh
@@ -0,0 +1,293 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+
+bin_link_dir="/usr/bin"
+#inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+
+# old bin dir
+bin_dir="/usr/local/tarbitrator/bin"
+
+service_config_dir="/etc/systemd/system"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact hanatech.com.cn for support."
+ os_type=1
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ #${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/remove_arbi_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_prodb.sh ${bin_link_dir}/rmtarbitrator || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function clean_service_on_sysvinit() {
+ #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
+ #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install prodbs service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ ${csudo} systemctl enable tarbitratord
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_tarbitrator
+ fi
+}
+
+function update_prodb() {
+ # Start to update
+ echo -e "${GREEN}Start to update ProDB's arbitrator ...${NC}"
+ # Stop the service if running
+ if pidof tarbitrator &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop tarbitratord || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service tarbitratord stop || :
+ else
+ kill_tarbitrator
+ fi
+ sleep 1
+ fi
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+
+ echo
+ #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
+ fi
+ echo
+ echo -e "\033[44;32;1mProDB's arbitrator is updated successfully!${NC}"
+}
+
+function install_prodb() {
+ # Start to install
+ echo -e "${GREEN}Start to install ProDB's arbitrator ...${NC}"
+
+ install_main_path
+ #install_header
+ install_bin
+ install_service
+ echo
+ #echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/taos/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
+ fi
+
+ echo -e "\033[44;32;1mProDB's arbitrator is installed successfully!${NC}"
+ echo
+}
+
+
+## ==============================Main program starts from here============================
+# Install server and client
+if [ -x ${bin_dir}/tarbitrator ]; then
+ update_flag=1
+ update_prodb
+else
+ install_prodb
+fi
+
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index aa09013e538253b8740a0aaf70d04358320a6dd8..3df7013b197baaf4d78bb0f0ae5d507d6be92715 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -128,8 +128,12 @@ function install_lib() {
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
- ${csudo} ldconfig
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ldconfig
+ else
+ ${csudo} update_dyld_shared_cache
+ fi
}
function install_header() {
diff --git a/packaging/tools/install_client_pro.sh b/packaging/tools/install_client_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fff8ae31200669ee3ab918a873e33fc32ece37c8
--- /dev/null
+++ b/packaging/tools/install_client_pro.sh
@@ -0,0 +1,248 @@
+#!/bin/bash
+#
+# This file is used to install ProDB client on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+# -----------------------Variables definition---------------------
+
+osType=Linux
+pagMode=full
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir=$(dirname $(readlink -f "$0"))
+ # Dynamic directory
+ data_dir="/var/lib/ProDB"
+ log_dir="/var/log/ProDB"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ data_dir="/var/lib/ProDB"
+ log_dir="~/ProDB/log"
+fi
+
+log_link_dir="/usr/local/ProDB/log"
+
+cfg_install_dir="/etc/ProDB"
+
+if [ "$osType" != "Darwin" ]; then
+ bin_link_dir="/usr/bin"
+ lib_link_dir="/usr/lib"
+ lib64_link_dir="/usr/lib64"
+ inc_link_dir="/usr/include"
+else
+ bin_link_dir="/usr/local/bin"
+ lib_link_dir="/usr/local/lib"
+ inc_link_dir="/usr/local/include"
+fi
+
+#install main path
+install_main_dir="/usr/local/ProDB"
+
+# old bin dir
+bin_dir="/usr/local/ProDB/bin"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+function kill_client() {
+ pid=$(ps -ef | grep "prodbc" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/prodbc || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${bin_link_dir}/prodemo || :
+ ${csudo} rm -f ${bin_link_dir}/prodump || :
+ fi
+ ${csudo} rm -f ${bin_link_dir}/rmprodb || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/prodbc ] && ${csudo} ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || :
+ if [ "$osType" != "Darwin" ]; then
+ [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || :
+ [ -x ${install_main_dir}/bin/prodump ] && ${csudo} ln -s ${install_main_dir}/bin/prodump ${bin_link_dir}/prodump || :
+ fi
+ [ -x ${install_main_dir}/bin/remove_client_prodb.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_prodb.sh ${bin_link_dir}/rmprodb || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ sudo rm -f /usr/lib/libtaos.* || :
+ sudo rm -rf ${lib_dir} || :
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ #${csudo} rm -rf ${v15_java_app_dir} || :
+
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [ -d "${lib64_link_dir}" ]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+ else
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
+ fi
+
+ ${csudo} ldconfig
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_config() {
+ #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
+
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ else
+ mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+ fi
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function update_prodb() {
+ # Start to update
+ if [ ! -e prodb.tar.gz ]; then
+ echo "File prodb.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf prodb.tar.gz
+
+ echo -e "${GREEN}Start to update ProDB client...${NC}"
+ # Stop the client shell if running
+ if pidof prodbc &> /dev/null; then
+ kill_client
+ sleep 1
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mProDB client is updated successfully!${NC}"
+
+ rm -rf $(tar -tf prodb.tar.gz)
+}
+
+function install_prodb() {
+ # Start to install
+ if [ ! -e prodb.tar.gz ]; then
+ echo "File prodb.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf prodb.tar.gz
+
+ echo -e "${GREEN}Start to install ProDB client...${NC}"
+
+ install_main_path
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mProDB client is installed successfully!${NC}"
+
+ rm -rf $(tar -tf prodb.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+# Install or updata client and client
+# if server is already install, don't install client
+ if [ -e ${bin_dir}/prodbs ]; then
+ echo -e "\033[44;32;1mThere are already installed ProDB server, so don't need install client!${NC}"
+ exit 0
+ fi
+
+ if [ -x ${bin_dir}/prodbc ]; then
+ update_flag=1
+ update_prodb
+ else
+ install_prodb
+ fi
diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..564561441646d4bd27f22c5abd9250a9c3377002
--- /dev/null
+++ b/packaging/tools/install_pro.sh
@@ -0,0 +1,948 @@
+#!/bin/bash
+#
+# This file is used to install database on linux systems. The operating system
+# is required to use systemd to manage services at boot
+
+set -e
+#set -x
+
+verMode=edge
+pagMode=full
+
+iplist=""
+serverFqdn=""
+# -----------------------Variables definition---------------------
+script_dir=$(dirname $(readlink -f "$0"))
+# Dynamic directory
+data_dir="/var/lib/ProDB"
+log_dir="/var/log/ProDB"
+
+data_link_dir="/usr/local/ProDB/data"
+log_link_dir="/usr/local/ProDB/log"
+
+cfg_install_dir="/etc/ProDB"
+
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+#install main path
+install_main_dir="/usr/local/ProDB"
+
+# old bin dir
+bin_dir="/usr/local/ProDB/bin"
+
+service_config_dir="/etc/systemd/system"
+nginx_port=6060
+nginx_dir="/usr/local/nginxd"
+
+# Color setting
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+GREEN_DARK='\033[0;32m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+update_flag=0
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+
+# get the operating system type for using the corresponding init file
+# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
+#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
+else
+ osinfo=""
+fi
+#echo "osinfo: ${osinfo}"
+os_type=0
+if echo $osinfo | grep -qwi "ubuntu" ; then
+# echo "This is ubuntu system"
+ os_type=1
+elif echo $osinfo | grep -qwi "debian" ; then
+# echo "This is debian system"
+ os_type=1
+elif echo $osinfo | grep -qwi "Kylin" ; then
+# echo "This is Kylin system"
+ os_type=1
+elif echo $osinfo | grep -qwi "centos" ; then
+# echo "This is centos system"
+ os_type=2
+elif echo $osinfo | grep -qwi "fedora" ; then
+# echo "This is fedora system"
+ os_type=2
+else
+ echo " osinfo: ${osinfo}"
+ echo " This is an officially unverified linux system,"
+ echo " if there are any problems with the installation and operation, "
+ echo " please feel free to contact hanatech.com.cn for support."
+ os_type=1
+fi
+
+
+# ============================= get input parameters =================================================
+
+# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
+
+# set parameters by default value
+interactiveFqdn=yes # [yes | no]
+verType=server # [server | client]
+initType=systemd # [systemd | service | ...]
+
+while getopts "hv:e:i:" arg
+do
+ case $arg in
+ e)
+ #echo "interactiveFqdn=$OPTARG"
+ interactiveFqdn=$( echo $OPTARG )
+ ;;
+ v)
+ #echo "verType=$OPTARG"
+ verType=$(echo $OPTARG)
+ ;;
+ i)
+ #echo "initType=$OPTARG"
+ initType=$(echo $OPTARG)
+ ;;
+ h)
+ echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
+ exit 0
+ ;;
+ ?) #unknow option
+ echo "unkonw argument"
+ exit 1
+ ;;
+ esac
+done
+
+function kill_process() {
+ pid=$(ps -ef | grep "$1" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function install_main_path() {
+ #create install main dir and all sub dir
+ ${csudo} rm -rf ${install_main_dir} || :
+ ${csudo} mkdir -p ${install_main_dir}
+ ${csudo} mkdir -p ${install_main_dir}/cfg
+ ${csudo} mkdir -p ${install_main_dir}/bin
+ ${csudo} mkdir -p ${install_main_dir}/connector
+ ${csudo} mkdir -p ${install_main_dir}/driver
+ ${csudo} mkdir -p ${install_main_dir}/examples
+ ${csudo} mkdir -p ${install_main_dir}/include
+ ${csudo} mkdir -p ${install_main_dir}/init.d
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} mkdir -p ${nginx_dir}
+ fi
+}
+
+function install_bin() {
+ # Remove links
+ ${csudo} rm -f ${bin_link_dir}/prodbc || :
+ ${csudo} rm -f ${bin_link_dir}/prodbs || :
+ ${csudo} rm -f ${bin_link_dir}/prodemo || :
+ ${csudo} rm -f ${bin_link_dir}/rmprodb || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+
+ ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
+
+ #Make link
+ [ -x ${install_main_dir}/bin/prodbc ] && ${csudo} ln -s ${install_main_dir}/bin/prodbc ${bin_link_dir}/prodbc || :
+ [ -x ${install_main_dir}/bin/prodbs ] && ${csudo} ln -s ${install_main_dir}/bin/prodbs ${bin_link_dir}/prodbs || :
+ [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || :
+ [ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || :
+ [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
+ [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
+
+ if [ "$verMode" == "cluster" ]; then
+ ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
+ ${csudo} mkdir -p ${nginx_dir}/logs
+ ${csudo} chmod 777 ${nginx_dir}/sbin/nginx
+ fi
+}
+
+function install_lib() {
+ # Remove links
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+ ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
+ ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
+ ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
+ ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ fi
+
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ldconfig
+ else
+ ${csudo} update_dyld_shared_cache
+ fi
+}
+
+function install_header() {
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+}
+
+function install_jemalloc() {
+ jemalloc_dir=${script_dir}/jemalloc
+
+ if [ -d ${jemalloc_dir} ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/bin
+
+ if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/bin/jeprof ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin
+ fi
+ if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
+ ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
+ ${csudo} /usr/bin/install -c -d /usr/local/lib
+ if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ fi
+ if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ fi
+ fi
+ if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ fi
+ if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then
+ ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
+ ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ fi
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
+ fi
+}
+
+function add_newHostname_to_hosts() {
+ localIp="127.0.0.1"
+ OLD_IFS="$IFS"
+ IFS=" "
+ iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
+ arr=($iphost)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$localIp" ]]; then
+ return
+ fi
+ done
+ ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||:
+}
+
+function set_hostname() {
+ echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:"
+ read newHostname
+ while true; do
+ if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then
+ break
+ else
+ read -p "Please enter one hostname(must not be 'localhost'):" newHostname
+ fi
+ done
+
+ ${csudo} hostname $newHostname ||:
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ echo
+ echo "set hostname fail!"
+ return
+ fi
+
+ #ubuntu/centos /etc/hostname
+ if [[ -e /etc/hostname ]]; then
+ ${csudo} echo $newHostname > /etc/hostname ||:
+ fi
+
+ #debian: #HOSTNAME=yourname
+ if [[ -e /etc/sysconfig/network ]]; then
+ ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||:
+ fi
+
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$newHostname
+
+ if [[ -e /etc/hosts ]]; then
+ add_newHostname_to_hosts $newHostname
+ fi
+}
+
+function is_correct_ipaddr() {
+ newIp=$1
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($iplist)
+ IFS="$OLD_IFS"
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == "$newIp" ]]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+function set_ipAsFqdn() {
+ iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||:
+ if [ -z "$iplist" ]; then
+ iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||:
+ fi
+
+ if [ -z "$iplist" ]; then
+ echo
+ echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}"
+ localFqdn="127.0.0.1"
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ echo
+ return
+ fi
+
+ echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:"
+ echo
+ echo -e -n "${GREEN}$iplist${NC}"
+ echo
+ echo
+ echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:"
+ read localFqdn
+ while true; do
+ if [ ! -z "$localFqdn" ]; then
+ # Check if correct ip address
+ is_correct_ipaddr $localFqdn
+ retval=`echo $?`
+ if [[ $retval != 0 ]]; then
+ read -p "Please choose an IP from local IP list:" localFqdn
+ else
+ # Write the local FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg
+ serverFqdn=$localFqdn
+ break
+ fi
+ else
+ read -p "Please choose an IP from local IP list:" localFqdn
+ fi
+ done
+}
+
+function local_fqdn_check() {
+ #serverFqdn=$(hostname)
+ echo
+ echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}"
+ echo
+ if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then
+ echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}"
+ echo
+
+ while true
+ do
+ read -r -p "Set hostname now? [Y/n] " input
+ if [ ! -n "$input" ]; then
+ set_hostname
+ break
+ else
+ case $input in
+ [yY][eE][sS]|[yY])
+ set_hostname
+ break
+ ;;
+
+ [nN][oO]|[nN])
+ set_ipAsFqdn
+ break
+ ;;
+
+ *)
+ echo "Invalid input..."
+ ;;
+ esac
+ fi
+ done
+ fi
+}
+
+function install_config() {
+ if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
+ ${csudo} mkdir -p ${cfg_install_dir}
+ [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
+ ${csudo} chmod 644 ${cfg_install_dir}/*
+ fi
+
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ [ ! -z $1 ] && return 0 || : # only install client
+
+ if ((${update_flag}==1)); then
+ return 0
+ fi
+
+ if [ "$interactiveFqdn" == "no" ]; then
+ return 0
+ fi
+
+ local_fqdn_check
+
+ #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
+ #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
+ #PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
+ #FQDN_PATTERN=":[0-9]{1,5}$"
+
+ # first full-qualified domain name (FQDN) for ProDB cluster system
+ echo
+ echo -e -n "${GREEN}Enter FQDN:port (like h1.hanatech.com.cn:6030) of an existing ProDB cluster node to join${NC}"
+ echo
+ echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
+ read firstEp
+ while true; do
+ if [ ! -z "$firstEp" ]; then
+ # check the format of the firstEp
+ #if [[ $firstEp == $FQDN_PATTERN ]]; then
+ # Write the first FQDN to configuration file
+ ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
+ break
+ #else
+ # read -p "Please enter the correct FQDN:port: " firstEp
+ #fi
+ else
+ break
+ fi
+ done
+}
+
+
+function install_log() {
+ ${csudo} rm -rf ${log_dir} || :
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
+
+ ${csudo} ln -s ${log_dir} ${install_main_dir}/log
+}
+
+function install_data() {
+ ${csudo} mkdir -p ${data_dir}
+
+ ${csudo} ln -s ${data_dir} ${install_main_dir}/data
+}
+
+function install_connector() {
+ ${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
+}
+
+function install_examples() {
+ if [ -d ${script_dir}/examples ]; then
+ ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof prodbs &> /dev/null; then
+ ${csudo} service prodbs stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} chkconfig --del prodbs || :
+ fi
+
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} insserv -r prodbs || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} update-rc.d -f prodbs remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/prodbs || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function install_service_on_sysvinit() {
+ clean_service_on_sysvinit
+ sleep 1
+
+ # Install prodbs service
+
+ if ((${os_type}==1)); then
+ ${csudo} cp -f ${script_dir}/init.d/prodbs.deb ${install_main_dir}/init.d/prodbs
+ ${csudo} cp ${script_dir}/init.d/prodbs.deb ${service_config_dir}/prodbs && ${csudo} chmod a+x ${service_config_dir}/prodbs
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ elif ((${os_type}==2)); then
+ ${csudo} cp -f ${script_dir}/init.d/prodbs.rpm ${install_main_dir}/init.d/prodbs
+ ${csudo} cp ${script_dir}/init.d/prodbs.rpm ${service_config_dir}/prodbs && ${csudo} chmod a+x ${service_config_dir}/prodbs
+ ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
+ ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
+ fi
+
+ if ((${initd_mod}==1)); then
+ ${csudo} chkconfig --add prodbs || :
+ ${csudo} chkconfig --level 2345 prodbs on || :
+ ${csudo} chkconfig --add tarbitratord || :
+ ${csudo} chkconfig --level 2345 tarbitratord on || :
+ elif ((${initd_mod}==2)); then
+ ${csudo} insserv prodbs || :
+ ${csudo} insserv -d prodbs || :
+ ${csudo} insserv tarbitratord || :
+ ${csudo} insserv -d tarbitratord || :
+ elif ((${initd_mod}==3)); then
+ ${csudo} update-rc.d prodbs defaults || :
+ ${csudo} update-rc.d tarbitratord defaults || :
+ fi
+}
+
+function clean_service_on_systemd() {
+ prodbs_service_config="${service_config_dir}/prodbs.service"
+ if systemctl is-active --quiet prodbs; then
+ echo "ProDB is running, stopping it..."
+ ${csudo} systemctl stop prodbs &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable prodbs &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${prodbs_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ if systemctl is-active --quiet tarbitratord; then
+ echo "tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ if systemctl is-active --quiet nginxd; then
+ echo "Nginx for ProDB is running, stopping it..."
+ ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+}
+
+function install_service_on_systemd() {
+ clean_service_on_systemd
+
+ prodbs_service_config="${service_config_dir}/prodbs.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'Description=ProDB server service' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/prodbs' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'ExecStartPre=/usr/local/ProDB/bin/startPre.sh' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${prodbs_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${prodbs_service_config}"
+ ${csudo} systemctl enable prodbs
+
+ tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Description=ProDB arbitrator service' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
+ #${csudo} systemctl enable tarbitratord
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/nginxd.service"
+ ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo >> ${nginx_service_config}"
+ ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
+ ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
+ if ! ${csudo} systemctl enable nginxd &> /dev/null; then
+ ${csudo} systemctl daemon-reexec
+ ${csudo} systemctl enable nginxd
+ fi
+ ${csudo} systemctl start nginxd
+ fi
+}
+
+function install_service() {
+ if ((${service_mod}==0)); then
+ install_service_on_systemd
+ elif ((${service_mod}==1)); then
+ install_service_on_sysvinit
+ else
+ # must manual stop prodbs
+ kill_process prodbs
+ fi
+}
+
+vercomp () {
+ if [[ $1 == $2 ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i ver1=($1) ver2=($2)
+ # fill empty fields in ver1 with zeros
+ for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
+ ver1[i]=0
+ done
+
+ for ((i=0; i<${#ver1[@]}; i++)); do
+ if [[ -z ${ver2[i]} ]]
+ then
+ # fill empty fields in ver2 with zeros
+ ver2[i]=0
+ fi
+ if ((10#${ver1[i]} > 10#${ver2[i]}))
+ then
+ return 1
+ fi
+ if ((10#${ver1[i]} < 10#${ver2[i]}))
+ then
+ return 2
+ fi
+ done
+ return 0
+}
+
+function is_version_compatible() {
+ curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6`
+
+ if [ -f ${script_dir}/driver/vercomp.txt ]; then
+ min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
+ else
+ min_compatible_version=$(${script_dir}/bin/prodbs -V | head -1 | cut -d ' ' -f 5)
+ fi
+
+ vercomp $curr_version $min_compatible_version
+ case $? in
+ 0) return 0;;
+ 1) return 0;;
+ 2) return 1;;
+ esac
+}
+
+function update_prodb() {
+ # Start to update
+ if [ ! -e prodb.tar.gz ]; then
+ echo "File prodb.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf prodb.tar.gz
+ install_jemalloc
+
+ # Check if version compatible
+ if ! is_version_compatible; then
+ echo -e "${RED}Version incompatible${NC}"
+ return 1
+ fi
+
+ echo -e "${GREEN}Start to update ProDB...${NC}"
+ # Stop the service if running
+ if pidof prodbs &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop prodbs || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service prodbs stop || :
+ else
+ kill_process prodbs
+ fi
+ sleep 1
+ fi
+ if [ "$verMode" == "cluster" ]; then
+ if pidof nginx &> /dev/null; then
+ if ((${service_mod}==0)); then
+ ${csudo} systemctl stop nginxd || :
+ elif ((${service_mod}==1)); then
+ ${csudo} service nginxd stop || :
+ else
+ kill_process nginx
+ fi
+ sleep 1
+ fi
+ fi
+
+ install_main_path
+
+ install_log
+ install_header
+ install_lib
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+ if [ -z $1 ]; then
+ install_bin
+ install_service
+ install_config
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if openresty is installed
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for ProDB is updated successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ #echo
+ #echo -e "\033[44;32;1mProDB is updated successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} service prodbs start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: ./prodbs${NC}"
+ fi
+
+ if [ ${openresty_work} = 'true' ]; then
+ echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ else
+ echo -e "${GREEN_DARK}To access ProDB ${NC}: use ${GREEN_UNDERLINE}prodbc -h $serverFqdn${NC} in shell${NC}"
+ fi
+
+ echo
+ echo -e "\033[44;32;1mProDB is updated successfully!${NC}"
+ else
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mProDB client is updated successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf prodb.tar.gz)
+}
+
+function install_prodb() {
+ # Start to install
+ if [ ! -e prodb.tar.gz ]; then
+ echo "File prodb.tar.gz does not exist"
+ exit 1
+ fi
+ tar -zxf prodb.tar.gz
+
+ echo -e "${GREEN}Start to install ProDB...${NC}"
+
+ install_main_path
+
+ if [ -z $1 ]; then
+ install_data
+ fi
+
+ install_log
+ install_header
+ install_lib
+ install_jemalloc
+ if [ "$pagMode" != "lite" ]; then
+ install_connector
+ fi
+ install_examples
+
+ if [ -z $1 ]; then # install service and client
+ # For installing new
+ install_bin
+ install_service
+
+ openresty_work=false
+ if [ "$verMode" == "cluster" ]; then
+ # Check if nginx is installed successfully
+ if type curl &> /dev/null; then
+ if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
+ echo -e "\033[44;32;1mNginx for ProDB is installed successfully!${NC}"
+ openresty_work=true
+ else
+ echo -e "\033[44;31;5mNginx for ProDB does not work! Please try again!\033[0m"
+ fi
+ fi
+ fi
+
+ install_config
+
+ # Ask if to start the service
+ #echo
+ #echo -e "\033[44;32;1mProDB is installed successfully!${NC}"
+ echo
+ echo -e "${GREEN_DARK}To configure ProDB ${NC}: edit /etc/ProDB/taos.cfg"
+ if ((${service_mod}==0)); then
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} systemctl start prodbs${NC}"
+ elif ((${service_mod}==1)); then
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: ${csudo} service prodbs start${NC}"
+ else
+ echo -e "${GREEN_DARK}To start ProDB ${NC}: prodbs${NC}"
+ fi
+
+ if [ ! -z "$firstEp" ]; then
+ tmpFqdn=${firstEp%%:*}
+ substr=":"
+ if [[ $firstEp =~ $substr ]];then
+ tmpPort=${firstEp#*:}
+ else
+ tmpPort=""
+ fi
+ if [[ "$tmpPort" != "" ]];then
+ echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
+ else
+ echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
+ fi
+ echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
+ echo
+ elif [ ! -z "$serverFqdn" ]; then
+ echo -e "${GREEN_DARK}To access ProDB ${NC}: prodbc -h $serverFqdn${GREEN_DARK} to login into ProDB server${NC}"
+ echo
+ fi
+ echo -e "\033[44;32;1mProDB is installed successfully!${NC}"
+ echo
+ else # Only install client
+ install_bin
+ install_config
+
+ echo
+ echo -e "\033[44;32;1mProDB client is installed successfully!${NC}"
+ fi
+
+ rm -rf $(tar -tf prodb.tar.gz)
+}
+
+
+## ==============================Main program starts from here============================
+serverFqdn=$(hostname)
+if [ "$verType" == "server" ]; then
+ # Install server and client
+ if [ -x ${bin_dir}/prodbs ]; then
+ update_flag=1
+ update_prodb
+ else
+ install_prodb
+ fi
+elif [ "$verType" == "client" ]; then
+ interactiveFqdn=no
+ # Only install client
+ if [ -x ${bin_dir}/prodbc ]; then
+ update_flag=1
+ update_prodb client
+ else
+ install_prodb client
+ fi
+else
+ echo "please input correct verType"
+fi
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 7851587c826c1667386d6dc9c91f1eef748927db..d400d0b91a2d02e9b3e0232d67e2ed6b00cdf541 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -19,35 +19,35 @@ else
fi
# Dynamic directory
-data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then
+ data_dir="/var/lib/taos"
log_dir="/var/log/taos"
-else
- log_dir=~/TDengine/log
-fi
-
-data_link_dir="/usr/local/taos/data"
-log_link_dir="/usr/local/taos/log"
-cfg_install_dir="/etc/taos"
+ cfg_install_dir="/etc/taos"
-if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
+
+ install_main_dir="/usr/local/taos"
+
+ bin_dir="/usr/local/taos/bin"
else
+ data_dir="/usr/local/var/lib/taos"
+ log_dir="/usr/local/var/log/taos"
+
+ cfg_install_dir="/usr/local/etc/taos"
+
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
-fi
-#install main path
-install_main_dir="/usr/local/taos"
+ install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
-# old bin dir
-bin_dir="/usr/local/taos/bin"
+ bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin"
+fi
service_config_dir="/etc/systemd/system"
@@ -59,12 +59,11 @@ GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
csudo=""
-if command -v sudo > /dev/null; then
- csudo="sudo"
-fi
if [ "$osType" != "Darwin" ]; then
-
+ if command -v sudo > /dev/null; then
+ csudo="sudo"
+ fi
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
@@ -137,17 +136,17 @@ function install_main_path() {
function install_bin() {
# Remove links
- ${csudo} rm -f ${bin_link_dir}/taos || :
+ ${csudo} rm -f ${bin_link_dir}/taos || :
+ ${csudo} rm -f ${bin_link_dir}/taosd || :
+ ${csudo} rm -f ${bin_link_dir}/taosdemo || :
+ ${csudo} rm -f ${bin_link_dir}/taosdump || :
if [ "$osType" != "Darwin" ]; then
- ${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/taosdemo || :
- ${csudo} rm -f ${bin_link_dir}/taosdump || :
+ ${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/set_core || :
+ ${csudo} rm -f ${bin_link_dir}/rmtaos || :
fi
-
- ${csudo} rm -f ${bin_link_dir}/rmtaos || :
-
+
${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
@@ -161,19 +160,18 @@ function install_bin() {
${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
- [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
+ [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
+ [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
+ [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
+ [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
if [ "$osType" != "Darwin" ]; then
- [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
- [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
- [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi
-
+
if [ "$osType" != "Darwin" ]; then
- [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
- else
- [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
+ [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
fi
}
@@ -220,7 +218,7 @@ function install_jemalloc() {
fi
if [ -d /etc/ld.so.conf.d ]; then
- ${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
@@ -245,11 +243,12 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
- ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
- ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
+ ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
+
+ ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
-
+
install_jemalloc
if [ "$osType" != "Darwin" ]; then
@@ -259,10 +258,14 @@ function install_lib() {
function install_header() {
- ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
+ fi
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
- ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
- ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
+ ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
+ fi
}
function install_config() {
@@ -270,23 +273,20 @@ function install_config() {
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
+ [ -f ${script_dir}/../cfg/taos.cfg ] &&
+ ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+
+ if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
+ fi
}
function install_log() {
${csudo} rm -rf ${log_dir} || :
-
- if [ "$osType" != "Darwin" ]; then
- ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
- else
- mkdir -p ${log_dir} && chmod 777 ${log_dir}
- fi
-
+ ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
@@ -307,7 +307,6 @@ function install_connector() {
echo "WARNING: go connector not found, please check if want to use it!"
fi
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
-
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
}
@@ -487,24 +486,21 @@ function install_TDengine() {
else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
fi
-
+
install_main_path
- if [ "$osType" != "Darwin" ]; then
- install_data
- fi
+ install_data
install_log
install_header
install_lib
install_connector
install_examples
-
install_bin
-
+
if [ "$osType" != "Darwin" ]; then
install_service
fi
-
+
install_config
if [ "$osType" != "Darwin" ]; then
diff --git a/packaging/tools/makearbi_pro.sh b/packaging/tools/makearbi_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6ce3765e44acc408ced9730c54b793338eb37b38
--- /dev/null
+++ b/packaging/tools/makearbi_pro.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Generate arbitrator's tar.gz setup package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/ProDB-enterprise-arbitrator-${version}"
+else
+ install_dir="${release_dir}/ProDB-arbitrator-${version}"
+fi
+
+# Directories and files.
+bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_pro.sh"
+install_files="${script_dir}/install_arbi_pro.sh"
+
+#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
+init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
+
+# make directories.
+mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_pro.sh || :
+#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
+mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
+mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..599c91fbf082955887c677b750aa12f946c0890b
--- /dev/null
+++ b/packaging/tools/makeclient_pro.sh
@@ -0,0 +1,225 @@
+#!/bin/bash
+#
+# Generate tar.gz package for linux client in all os system
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+
+if [ "$osType" != "Darwin" ]; then
+ script_dir="$(dirname $(readlink -f $0))"
+ top_dir="$(readlink -f ${script_dir}/../..)"
+else
+ script_dir=`dirname $0`
+ cd ${script_dir}
+ script_dir="$(pwd)"
+ top_dir=${script_dir}/../..
+fi
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/ProDB-enterprise-client-${version}"
+else
+ install_dir="${release_dir}/ProDB-client-${version}"
+fi
+
+# Directories and files.
+
+if [ "$osType" != "Darwin" ]; then
+ lib_files="${build_dir}/lib/libtaos.so.${version}"
+else
+ bin_files="${build_dir}/bin/taos ${script_dir}/remove_client_pro.sh"
+ lib_files="${build_dir}/lib/libtaos.${version}.dylib"
+fi
+
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+
+install_files="${script_dir}/install_client_pro.sh"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
+sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
+sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
+
+mkdir -p ${install_dir}/bin
+if [ "$osType" != "Darwin" ]; then
+ if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
+ cp ${script_dir}/remove_pro.sh ${install_dir}/bin
+ else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
+ cp ${script_dir}/remove_pro.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+ fi
+else
+ cp ${bin_files} ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+if [ -f ${build_dir}/bin/jemalloc-config ]; then
+ mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
+ cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin
+ if [ -f ${build_dir}/bin/jemalloc.sh ]; then
+ cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/bin/jeprof ]; then
+ cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin
+ fi
+ if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then
+ cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then
+ cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
+ ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ fi
+ if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
+ cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
+ fi
+ if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then
+ cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc
+ fi
+ if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then
+ cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3
+ fi
+fi
+
+cd ${install_dir}
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f prodb.tar.gz * --remove-files || :
+else
+ tar -zcv -f prodb.tar.gz * || :
+ mv prodb.tar.gz ..
+ rm -rf ./*
+ mv ../prodb.tar.gz .
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$osType" == "Darwin" ]; then
+ sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh
+ mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_pro.sh >> install_client_prodb_temp.sh
+ mv install_client_prodb_temp.sh ${install_dir}/install_client_pro.sh
+fi
+chmod a+x ${install_dir}/install_client_pro.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go
+ sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver
+cp ${lib_files} ${install_dir}/driver
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ if [ "$osType" != "Darwin" ]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+ fi
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector
+ mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py
+ sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py
+fi
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stable or beta"
+ exit 1
+fi
+
+if [ "$osType" != "Darwin" ]; then
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+else
+ tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
+ mv "$(basename ${pkg_name}).tar.gz" ..
+ rm -rf ./*
+ mv ../"$(basename ${pkg_name}).tar.gz" .
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ffe4566b42017a7bffa6166ae28e18ca29bd03cd
--- /dev/null
+++ b/packaging/tools/makepkg_pro.sh
@@ -0,0 +1,193 @@
+#!/bin/bash
+#
+# Generate tar.gz package for all os system
+
+set -e
+#set -x
+
+curr_dir=$(pwd)
+compile_dir=$1
+version=$2
+build_time=$3
+cpuType=$4
+osType=$5
+verMode=$6
+verType=$7
+pagMode=$8
+versionComp=$9
+
+script_dir="$(dirname $(readlink -f $0))"
+top_dir="$(readlink -f ${script_dir}/../..)"
+
+# create compressed install file.
+build_dir="${compile_dir}/build"
+code_dir="${top_dir}/src"
+release_dir="${top_dir}/release"
+
+#package_name='linux'
+if [ "$verMode" == "cluster" ]; then
+ install_dir="${release_dir}/ProDB-enterprise-server-${version}"
+else
+ install_dir="${release_dir}/ProDB-server-${version}"
+fi
+
+lib_files="${build_dir}/lib/libtaos.so.${version}"
+header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
+if [ "$verMode" == "cluster" ]; then
+ cfg_dir="${top_dir}/../enterprise/packaging/cfg"
+else
+ cfg_dir="${top_dir}/packaging/cfg"
+fi
+install_files="${script_dir}/install_pro.sh"
+nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
+
+# make directories.
+mkdir -p ${install_dir}
+mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
+mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
+
+#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
+mkdir -p ${install_dir}/bin
+if [ "$pagMode" == "lite" ]; then
+ strip ${build_dir}/bin/taosd
+ strip ${build_dir}/bin/taos
+ cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs
+ cp ${script_dir}/remove_pro.sh ${install_dir}/bin
+else
+ cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
+ cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs
+ cp ${script_dir}/remove_pro.sh ${install_dir}/bin
+ cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo
+ cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
+ cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
+ cp ${script_dir}/set_core.sh ${install_dir}/bin
+ cp ${script_dir}/get_client.sh ${install_dir}/bin
+ cp ${script_dir}/startPre.sh ${install_dir}/bin
+ cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
+fi
+chmod a+x ${install_dir}/bin/* || :
+
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_pro.sh >> remove_prodb_temp.sh
+ mv remove_prodb_temp.sh ${install_dir}/bin/remove_pro.sh
+
+ mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
+ cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
+ rm -rf ${install_dir}/nginxd/png
+
+ sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/*.html
+ sed -i "s/TDengine/ProDB/g" ${install_dir}/nginxd/admin/js/*.js
+
+ sed -i '/dataDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
+ sed -i '/logDir/ {s/taos/ProDB/g}' ${install_dir}/cfg/taos.cfg
+ sed -i "s/TDengine/ProDB/g" ${install_dir}/cfg/taos.cfg
+
+ if [ "$cpuType" == "aarch64" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
+ elif [ "$cpuType" == "aarch32" ]; then
+ cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
+ fi
+ rm -rf ${install_dir}/nginxd/sbin/arm
+fi
+
+cd ${install_dir}
+tar -zcv -f prodb.tar.gz * --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar prodb.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
+cp ${install_files} ${install_dir}
+if [ "$verMode" == "cluster" ]; then
+ sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_pro.sh >> install_prodb_temp.sh
+ mv install_prodb_temp.sh ${install_dir}/install_pro.sh
+fi
+if [ "$pagMode" == "lite" ]; then
+ sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_prodb_temp.sh
+ mv install_prodb_temp.sh ${install_dir}/install_pro.sh
+fi
+chmod a+x ${install_dir}/install_pro.sh
+
+# Copy example code
+mkdir -p ${install_dir}/examples
+examples_dir="${top_dir}/tests/examples"
+cp -r ${examples_dir}/c ${install_dir}/examples
+sed -i '/passwd/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
+sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/c/*.c
+
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp -r ${examples_dir}/JDBC ${install_dir}/examples
+ cp -r ${examples_dir}/matlab ${install_dir}/examples
+ mv ${install_dir}/examples/matlab/TDengineDemo.m ${install_dir}/examples/matlab/ProDBDemo.m
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/matlab/ProDBDemo.m
+ cp -r ${examples_dir}/python ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/python/read_example.py
+ cp -r ${examples_dir}/R ${install_dir}/examples
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/examples/R/command.txt
+ cp -r ${examples_dir}/go ${install_dir}/examples
+ mv ${install_dir}/examples/go/taosdemo.go ${install_dir}/examples/go/prodemo.go
+ sed -i '/root/ {s/taosdata/prodb/g}' ${install_dir}/examples/go/prodemo.go
+fi
+# Copy driver
+mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" > ${install_dir}/driver/vercomp.txt
+
+# Copy connector
+connector_dir="${code_dir}/connector"
+mkdir -p ${install_dir}/connector
+if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
+ cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
+
+ if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
+ cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
+ else
+ echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
+ fi
+ if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
+ cp -r ${connector_dir}/go ${install_dir}/connector
+ else
+ echo "WARNING: go connector not found, please check if want to use it!"
+ fi
+ cp -r ${connector_dir}/python ${install_dir}/connector/
+ mv ${install_dir}/connector/python/taos ${install_dir}/connector/python/prodb
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/cinterface.py
+
+ sed -i '/password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/subscription.py
+
+ sed -i '/self._password/ {s/taosdata/prodb/g}' ${install_dir}/connector/python/prodb/connection.py
+fi
+
+cd ${release_dir}
+
+if [ "$verMode" == "cluster" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+elif [ "$verMode" == "edge" ]; then
+ pkg_name=${install_dir}-${osType}-${cpuType}
+else
+ echo "unknow verMode, nor cluster or edge"
+ exit 1
+fi
+
+if [ "$pagMode" == "lite" ]; then
+ pkg_name=${pkg_name}-Lite
+fi
+
+if [ "$verType" == "beta" ]; then
+ pkg_name=${pkg_name}-${verType}
+elif [ "$verType" == "stable" ]; then
+ pkg_name=${pkg_name}
+else
+ echo "unknow verType, nor stabel or beta"
+ exit 1
+fi
+
+tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
+exitcode=$?
+if [ "$exitcode" != "0" ]; then
+ echo "tar ${pkg_name}.tar.gz error !!!"
+ exit $exitcode
+fi
+
+cd ${curr_dir}
diff --git a/packaging/tools/remove_arbi_pro.sh b/packaging/tools/remove_arbi_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ff10478881628bdaf027c618a1b89f204ebbdb35
--- /dev/null
+++ b/packaging/tools/remove_arbi_pro.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall ProDB's arbitrator
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/tarbitrator"
+bin_link_dir="/usr/bin"
+
+service_config_dir="/etc/systemd/system"
+tarbitrator_service_name="tarbitratord"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf /arbitrator.log || :
+}
+
+function clean_service_on_systemd() {
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "ProDB tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${tarbitratord_service_config}
+}
+
+function clean_service_on_sysvinit() {
+ if pidof tarbitrator &> /dev/null; then
+ echo "ProDB's tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+##clean_header
+# Remove log file
+clean_log
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}ProDB's arbitrator is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_client_pro.sh b/packaging/tools/remove_client_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..59e4e8997620af035821df5a975fe58f1357c9dc
--- /dev/null
+++ b/packaging/tools/remove_client_pro.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+#
+# Script to stop the client and uninstall database, but retain the config and log files.
+set -e
+# set -x
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/ProDB"
+
+log_link_dir="/usr/local/ProDB/log"
+cfg_link_dir="/usr/local/ProDB/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+function kill_client() {
+ if [ -n "$(pidof prodbc)" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/prodbc || :
+ ${csudo} rm -f ${bin_link_dir}/prodemo || :
+ ${csudo} rm -f ${bin_link_dir}/prodump || :
+ ${csudo} rm -f ${bin_link_dir}/rmprodb || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+# Stop client.
+kill_client
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+
+${csudo} rm -rf ${install_main_dir}
+
+echo -e "${GREEN}ProDB client is removed successfully!${NC}"
+echo
diff --git a/packaging/tools/remove_pro.sh b/packaging/tools/remove_pro.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f6dad22bc21b02a9d717d530c50bc19c5a718478
--- /dev/null
+++ b/packaging/tools/remove_pro.sh
@@ -0,0 +1,210 @@
+#!/bin/bash
+#
+# Script to stop the service and uninstall ProDB, but retain the config, data and log files.
+
+set -e
+#set -x
+
+verMode=edge
+
+RED='\033[0;31m'
+GREEN='\033[1;32m'
+NC='\033[0m'
+
+#install main path
+install_main_dir="/usr/local/ProDB"
+data_link_dir="/usr/local/ProDB/data"
+log_link_dir="/usr/local/ProDB/log"
+cfg_link_dir="/usr/local/ProDB/cfg"
+bin_link_dir="/usr/bin"
+lib_link_dir="/usr/lib"
+lib64_link_dir="/usr/lib64"
+inc_link_dir="/usr/include"
+install_nginxd_dir="/usr/local/nginxd"
+
+service_config_dir="/etc/systemd/system"
+prodb_service_name="prodbs"
+tarbitrator_service_name="tarbitratord"
+nginx_service_name="nginxd"
+csudo=""
+if command -v sudo > /dev/null; then
+ csudo="sudo"
+fi
+
+initd_mod=0
+service_mod=2
+if pidof systemd &> /dev/null; then
+ service_mod=0
+elif $(which service &> /dev/null); then
+ service_mod=1
+ service_config_dir="/etc/init.d"
+ if $(which chkconfig &> /dev/null); then
+ initd_mod=1
+ elif $(which insserv &> /dev/null); then
+ initd_mod=2
+ elif $(which update-rc.d &> /dev/null); then
+ initd_mod=3
+ else
+ service_mod=2
+ fi
+else
+ service_mod=2
+fi
+
+function kill_prodbs() {
+ pid=$(ps -ef | grep "prodbs" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function kill_tarbitrator() {
+ pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
+ if [ -n "$pid" ]; then
+ ${csudo} kill -9 $pid || :
+ fi
+}
+
+function clean_bin() {
+ # Remove link
+ ${csudo} rm -f ${bin_link_dir}/prodbc || :
+ ${csudo} rm -f ${bin_link_dir}/prodbs || :
+ ${csudo} rm -f ${bin_link_dir}/prodemo || :
+ ${csudo} rm -f ${bin_link_dir}/prodump || :
+ ${csudo} rm -f ${bin_link_dir}/rmprodb || :
+ ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
+ ${csudo} rm -f ${bin_link_dir}/set_core || :
+}
+
+function clean_lib() {
+ # Remove link
+ ${csudo} rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
+}
+
+function clean_header() {
+ # Remove link
+ ${csudo} rm -f ${inc_link_dir}/taos.h || :
+ ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
+}
+
+function clean_config() {
+ # Remove link
+ ${csudo} rm -f ${cfg_link_dir}/* || :
+}
+
+function clean_log() {
+ # Remove link
+ ${csudo} rm -rf ${log_link_dir} || :
+}
+
+function clean_service_on_systemd() {
+ prodb_service_config="${service_config_dir}/${prodb_service_name}.service"
+ if systemctl is-active --quiet ${prodb_service_name}; then
+ echo "ProDB prodbs is running, stopping it..."
+ ${csudo} systemctl stop ${prodb_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${prodb_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${prodb_service_config}
+
+ tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
+ if systemctl is-active --quiet ${tarbitrator_service_name}; then
+ echo "ProDB tarbitrator is running, stopping it..."
+ ${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
+ ${csudo} rm -f ${tarbitratord_service_config}
+
+ if [ "$verMode" == "cluster" ]; then
+ nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
+ if [ -d ${bin_dir}/web ]; then
+ if systemctl is-active --quiet ${nginx_service_name}; then
+ echo "Nginx for ProDB is running, stopping it..."
+ ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
+ fi
+ ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
+
+ ${csudo} rm -f ${nginx_service_config}
+ fi
+ fi
+}
+
+function clean_service_on_sysvinit() {
+ if pidof prodbs &> /dev/null; then
+ echo "ProDB prodbs is running, stopping it..."
+ ${csudo} service prodbs stop || :
+ fi
+
+ if pidof tarbitrator &> /dev/null; then
+ echo "ProDB tarbitrator is running, stopping it..."
+ ${csudo} service tarbitratord stop || :
+ fi
+
+ if ((${initd_mod}==1)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} chkconfig --del prodbs || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} chkconfig --del tarbitratord || :
+ fi
+ elif ((${initd_mod}==2)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} insserv -r prodbs || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} insserv -r tarbitratord || :
+ fi
+ elif ((${initd_mod}==3)); then
+ if [ -e ${service_config_dir}/prodbs ]; then
+ ${csudo} update-rc.d -f prodbs remove || :
+ fi
+ if [ -e ${service_config_dir}/tarbitratord ]; then
+ ${csudo} update-rc.d -f tarbitratord remove || :
+ fi
+ fi
+
+ ${csudo} rm -f ${service_config_dir}/prodbs || :
+ ${csudo} rm -f ${service_config_dir}/tarbitratord || :
+
+ if $(which init &> /dev/null); then
+ ${csudo} init q || :
+ fi
+}
+
+function clean_service() {
+ if ((${service_mod}==0)); then
+ clean_service_on_systemd
+ elif ((${service_mod}==1)); then
+ clean_service_on_sysvinit
+ else
+ # must manual stop taosd
+ kill_prodbs
+ kill_tarbitrator
+ fi
+}
+
+# Stop service and disable booting start.
+clean_service
+# Remove binary file and links
+clean_bin
+# Remove header file.
+clean_header
+# Remove lib file
+clean_lib
+# Remove link log directory
+clean_log
+# Remove link configuration file
+clean_config
+# Remove data link directory
+${csudo} rm -rf ${data_link_dir} || :
+
+${csudo} rm -rf ${install_main_dir}
+${csudo} rm -rf ${install_nginxd_dir}
+if [[ -e /etc/os-release ]]; then
+ osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+else
+ osinfo=""
+fi
+
+echo -e "${GREEN}ProDB is removed successfully!${NC}"
+echo
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index c04fa3298bd3acdee8fb88fb4f2eead2632a441f..ea5ce3bc52468d7efcc1ece78f46cbbc8c2c3a7e 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
base: core18
-version: '2.1.6.0'
+version: '2.1.7.2'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.1.6.0
+ - usr/lib/libtaos.so.2.1.7.2
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c
index 7d94df1c23ab7824dbada0423beec14530a2101c..04a14357c9e602807f5aa254d8a5ea25bc4b328d 100644
--- a/src/balance/src/bnScore.c
+++ b/src/balance/src/bnScore.c
@@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
- tsBnDnodes.maxSize = dnodesNum * 2;
- tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
+ int32_t maxSize = dnodesNum * 2;
+ SDnodeObj** list1 = NULL;
+ int32_t retry = 0;
+
+ while(list1 == NULL && retry++ < 3) {
+ list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
+ }
+ if(list1) {
+ tsBnDnodes.list = list1;
+ tsBnDnodes.maxSize = maxSize;
+ }
}
}
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index 2f83557d63c288173f8b541caf32fe9fe2e73338..cf53977103c3a9760286e70447d826f7026d7e53 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -4,22 +4,25 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(jni)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt ${VAR_TSZ})
+ TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m rt cJson ${VAR_TSZ})
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.so)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt)
+ TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m rt cJson)
IF (TD_LINUX_64)
- TARGET_LINK_LIBRARIES(taos lua)
+ TARGET_LINK_LIBRARIES(taos lua cJson)
ENDIF ()
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
@@ -34,16 +37,17 @@ IF (TD_LINUX)
ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua)
+ TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m lua cJson)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua)
+ TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m lua cJson)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
@@ -57,30 +61,32 @@ ELSEIF (TD_DARWIN)
ELSEIF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
CONFIGURE_FILE("${TD_COMMUNITY_DIR}/src/client/src/taos.rc.in" "${TD_COMMUNITY_DIR}/src/client/src/taos.rc")
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static trpc tutil query)
+ TARGET_LINK_LIBRARIES(taos_static trpc tutil query cJson)
# generate dynamic library (*.dll)
ADD_LIBRARY(taos SHARED ${SRC} ${TD_COMMUNITY_DIR}/src/client/src/taos.rc)
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
- TARGET_LINK_LIBRARIES(taos trpc tutil query lua)
+ TARGET_LINK_LIBRARIES(taos trpc tutil query lua cJson)
ELSEIF (TD_DARWIN)
SET(CMAKE_MACOSX_RPATH 1)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
ADD_LIBRARY(taos_static STATIC ${SRC})
- TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua)
+ TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m lua cJson)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
- TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua)
+ TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m lua cJson)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
diff --git a/src/client/inc/tscParseLine.h b/src/client/inc/tscParseLine.h
new file mode 100644
index 0000000000000000000000000000000000000000..401dcafdfbefd28e79ebdf30d810e194564a5056
--- /dev/null
+++ b/src/client/inc/tscParseLine.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_TSCPARSELINE_H
+#define TDENGINE_TSCPARSELINE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ char* key;
+ uint8_t type;
+ int16_t length;
+ char* value;
+} TAOS_SML_KV;
+
+typedef struct {
+ char* stableName;
+
+ char* childTableName;
+ TAOS_SML_KV* tags;
+ int32_t tagNum;
+
+ // first kv must be timestamp
+ TAOS_SML_KV* fields;
+ int32_t fieldNum;
+} TAOS_SML_DATA_POINT;
+
+typedef enum {
+ SML_TIME_STAMP_NOW,
+ SML_TIME_STAMP_SECONDS,
+ SML_TIME_STAMP_MILLI_SECONDS,
+ SML_TIME_STAMP_MICRO_SECONDS,
+ SML_TIME_STAMP_NANO_SECONDS
+} SMLTimeStampType;
+
+typedef struct {
+ uint64_t id;
+ SHashObj* smlDataToSchema;
+} SSmlLinesInfo;
+
+int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info);
+bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info);
+int32_t isValidChildTableName(const char *pTbName, int16_t len);
+
+bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info);
+int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info);
+
+void destroySmlDataPoint(TAOS_SML_DATA_POINT* point);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_TSCPARSELINE_H
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index f0349c2b3dc5b03b44afdca682f314709ecf6886..a012ca5a7fe741b8859465504cbc971a7e46952c 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy);
int tsInsertInitialCheck(SSqlObj *pSql);
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
+
+void tscFreeRetrieveSup(SSqlObj *pSql);
+
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index f2d25c1e84419ae4124bba2631918e46856ef487..c858bd5867c64da4c7397aed2035119ff414d112 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -29,15 +29,16 @@ extern "C" {
#include "tsched.h"
#include "tsclient.h"
-#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
+#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
+
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
-
-#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
+
+#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
-#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
+#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
#pragma pack(push,1)
@@ -91,7 +92,7 @@ typedef struct SMergeTsCtx {
}SMergeTsCtx;
typedef struct SVgroupTableInfo {
- SVgroupInfo vgInfo;
+ SVgroupMsg vgInfo;
SArray *itemList; // SArray
} SVgroupTableInfo;
@@ -143,6 +144,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
@@ -172,7 +174,9 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
-int tscAllocPayload(SSqlCmd* pCmd, int size);
+// the memory is not reset in case of fast allocate payload function
+int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size);
+int32_t tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
@@ -189,6 +193,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArray* pExprList);
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
+int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo);
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid);
@@ -213,6 +218,7 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function
int16_t size);
size_t tscNumOfExprs(SQueryInfo* pQueryInfo);
+int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo);
SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy);
@@ -221,7 +227,7 @@ void tscExprDestroy(SArray* pExprInfo);
int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, SExprInfo*** pExpr, int32_t* num);
-void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta);
+void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id);
SColumn* tscColumnClone(const SColumn* src);
void tscColumnCopy(SColumn* pDest, const SColumn* pSrc);
@@ -284,7 +290,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
+
+#if 0
void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
+#endif
+
/**
* The create object function must be successful expect for the out of memory issue.
*
@@ -314,13 +324,14 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlC
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
+int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries);
void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
bool hasMoreClauseToTry(SSqlObj* pSql);
-void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta);
+void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
@@ -359,7 +370,9 @@ bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
-void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id);
+void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
+
+char* cloneCurrentDBName(SSqlObj* pSql);
#ifdef __cplusplus
}
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 8d579b375a1ceb1b70a27692d24ec4404a574071..dd4ff7eb57f20cfc8d31328630fbb14b7acf7017 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -38,6 +38,11 @@ extern "C" {
#include "qUtil.h"
#include "tcmdtype.h"
+typedef enum {
+ TAOS_REQ_FROM_SHELL,
+ TAOS_REQ_FROM_HTTP
+} SReqOrigin;
+
// forward declaration
struct SSqlInfo;
@@ -123,17 +128,15 @@ typedef struct {
int32_t kvLen; // len of SKVRow
} SMemRowInfo;
typedef struct {
- uint8_t memRowType;
- uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need
- TDRowTLenT dataRowInitLen;
+ uint8_t memRowType; // default is 0, that is SDataRow
+ uint8_t compareStat; // 0 no need, 1 need compare
TDRowTLenT kvRowInitLen;
SMemRowInfo *rowInfo;
} SMemRowBuilder;
typedef enum {
- ROW_COMPARE_UNKNOWN = 0,
+ ROW_COMPARE_NO_NEED = 0,
ROW_COMPARE_NEED = 1,
- ROW_COMPARE_NO_NEED = 2,
} ERowCompareStat;
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
@@ -231,7 +234,6 @@ typedef struct STableDataBlocks {
typedef struct {
STableMeta *pTableMeta;
SArray *vgroupIdList;
-// SVgroupsInfo *pVgroupsInfo;
} STableMetaVgroupInfo;
typedef struct SInsertStatementParam {
@@ -283,20 +285,14 @@ typedef struct {
int32_t resColumnId;
} SSqlCmd;
-typedef struct SResRec {
- int numOfRows;
- int numOfTotal;
-} SResRec;
-
typedef struct {
int32_t numOfRows; // num of results in current retrieval
- int64_t numOfRowsGroup; // num of results of current group
int64_t numOfTotal; // num of total results
int64_t numOfClauseTotal; // num of total result in current subclause
char * pRsp;
int32_t rspType;
int32_t rspLen;
- uint64_t qId;
+ uint64_t qId; // query id of SQInfo
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
@@ -304,8 +300,6 @@ typedef struct {
int16_t precision;
bool completed;
int32_t code;
- int32_t numOfGroups;
- SResRec * pGroupRec;
char * data;
TAOS_ROW tsrow;
TAOS_ROW urow;
@@ -313,8 +307,7 @@ typedef struct {
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex* pColumnIndex;
- TAOS_FIELD* final;
- SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
+ TAOS_FIELD* final;
struct SGlobalMerger *pMerger;
} SSqlRes;
@@ -342,6 +335,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj
+ SReqOrigin from;
} STscObj;
typedef struct SSubqueryState {
@@ -373,7 +367,6 @@ typedef struct SSqlObj {
tsem_t rspSem;
SSqlCmd cmd;
SSqlRes res;
- bool isBind;
SSubqueryState subState;
struct SSqlObj **pSubs;
@@ -447,7 +440,7 @@ void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBloc
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
void destroyTableNameList(SInsertStatementParam* pInsertParam);
-void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
+void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta, uint64_t id);
/**
* free query result of the sql object
@@ -488,6 +481,7 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
+int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql);
int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
@@ -573,7 +567,7 @@ static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSc
SKVRow kvRow = memRowKvBody(dest);
memRowSetType(dest, SMEM_ROW_KV);
- memRowSetKvVersion(kvRow, dataRowVersion(dataRow));
+ memRowSetKvVersion(dest, dataRowVersion(dataRow));
kvRowSetNCols(kvRow, nBoundCols);
kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols));
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index 7181c658ddcdfde3efe7df3c0784c20f18bd4c03..61ae5082f31cd9129a3cec1eaa1e0552ada7993b 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -41,6 +41,14 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions
(JNIEnv *, jclass, jint, jstring);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setConfigImp
+ * Signature: (Ljava/lang/String;)Lcom/taosdata/jdbc/TSDBException;
+ */
+JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp
+ (JNIEnv *, jclass, jstring);
+
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: getTsCharset
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 7ba613de88f2d358e4a359cfa5fb0d5f32a1071e..925b7d75db9f88c9905270aa365c60990e9f45a3 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -20,12 +20,42 @@
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
-#define jniFatal(...) { if (jniDebugFlag & DEBUG_FATAL) { taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniError(...) { if (jniDebugFlag & DEBUG_ERROR) { taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniWarn(...) { if (jniDebugFlag & DEBUG_WARN) { taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniInfo(...) { if (jniDebugFlag & DEBUG_INFO) { taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }}
-#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }}
-#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }}
+#define jniFatal(...) \
+ { \
+ if (jniDebugFlag & DEBUG_FATAL) { \
+ taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniError(...) \
+ { \
+ if (jniDebugFlag & DEBUG_ERROR) { \
+ taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniWarn(...) \
+ { \
+ if (jniDebugFlag & DEBUG_WARN) { \
+ taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniInfo(...) \
+ { \
+ if (jniDebugFlag & DEBUG_INFO) { \
+ taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniDebug(...) \
+ { \
+ if (jniDebugFlag & DEBUG_DEBUG) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniTrace(...) \
+ { \
+ if (jniDebugFlag & DEBUG_TRACE) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
int __init = 0;
@@ -60,14 +90,14 @@ jmethodID g_blockdataSetByteArrayFp;
jmethodID g_blockdataSetNumOfRowsFp;
jmethodID g_blockdataSetNumOfColsFp;
-#define JNI_SUCCESS 0
-#define JNI_TDENGINE_ERROR -1
+#define JNI_SUCCESS 0
+#define JNI_TDENGINE_ERROR -1
#define JNI_CONNECTION_NULL -2
#define JNI_RESULT_SET_NULL -3
#define JNI_NUM_OF_FIELDS_0 -4
-#define JNI_SQL_NULL -5
-#define JNI_FETCH_END -6
-#define JNI_OUT_OF_MEMORY -7
+#define JNI_SQL_NULL -5
+#define JNI_FETCH_END -6
+#define JNI_OUT_OF_MEMORY -7
static void jniGetGlobalMethod(JNIEnv *env) {
// make sure init function executed once
@@ -129,13 +159,13 @@ static void jniGetGlobalMethod(JNIEnv *env) {
}
static int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
- if ((TAOS*) conn == NULL) {
+ if ((TAOS *)conn == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
- if ((TAOS_RES *) res == NULL) {
- jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn);
+ if ((TAOS_RES *)res == NULL) {
+ jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS *)conn);
return JNI_RESULT_SET_NULL;
}
@@ -170,6 +200,64 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e
jniDebug("jni initialized successfully, config directory: %s", configDir);
}
+JNIEXPORT jobject createTSDBException(JNIEnv *env, int code, char *msg) {
+ // find class
+ jclass exception_clazz = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBException");
+ // find methods
+ jmethodID init_method = (*env)->GetMethodID(env, exception_clazz, "", "()V");
+ jmethodID setCode_method = (*env)->GetMethodID(env, exception_clazz, "setCode", "(I)V");
+ jmethodID setMessage_method = (*env)->GetMethodID(env, exception_clazz, "setMessage", "(Ljava/lang/String;)V");
+ // new exception
+ jobject exception_obj = (*env)->NewObject(env, exception_clazz, init_method);
+ // set code
+ (*env)->CallVoidMethod(env, exception_obj, setCode_method, code);
+ // set message
+ jstring message = (*env)->NewStringUTF(env, msg);
+ (*env)->CallVoidMethod(env, exception_obj, setMessage_method, message);
+
+ return exception_obj;
+}
+
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: setConfigImp
+ * Signature: (Ljava/lang/String;)Lcom/taosdata/jdbc/TSDBException;
+ */
+JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp(JNIEnv *env, jclass jobj,
+ jstring config) {
+ /*
+ if (config == NULL) {
+ jniDebug("config value is null");
+ return -1;
+ }
+
+ const char *cfg = (*env)->GetStringUTFChars(env, config, NULL);
+ if (!cfg) {
+ return -1;
+ }
+ return 0;
+ */
+
+ if (config == NULL) {
+ char *msg = "config value is null";
+ jniDebug("config value is null");
+ return createTSDBException(env, -1, msg);
+ }
+
+ const char *cfg = (*env)->GetStringUTFChars(env, config, NULL);
+ if (!cfg) {
+ char *msg = "config value is null";
+ jniDebug("config value is null");
+ return createTSDBException(env, -1, msg);
+ }
+
+ setConfRet result = taos_set_config(cfg);
+ int code = result.retCode;
+ char * msg = result.retMsg;
+
+ return createTSDBException(env, code, msg);
+}
+
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex,
jstring optionValue) {
if (optionValue == NULL) {
@@ -216,7 +304,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost,
jint jport, jstring jdbName, jstring juser,
jstring jpass) {
- jlong ret = 0;
+ jlong ret = 0;
const char *host = NULL;
const char *user = NULL;
const char *pass = NULL;
@@ -246,7 +334,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
jniDebug("jobj:%p, pass not specified, use default password", jobj);
}
- ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
+ ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
@@ -289,7 +377,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
jsize len = (*env)->GetArrayLength(env, jsql);
- char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ char *str = (char *)calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
@@ -315,16 +403,17 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
}
free(str);
- return (jlong) pSql;
+ return (jlong)pSql;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con,
+ jlong tres) {
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
- return (jint)taos_errno((TAOS_RES*) tres);
+ return (jint)taos_errno((TAOS_RES *)tres);
}
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) {
@@ -334,7 +423,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
@@ -359,7 +448,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(
SSqlObj *pSql = (TAOS_RES *)tres;
- return (tscIsUpdateQuery(pSql)? 1:0);
+ return (tscIsUpdateQuery(pSql) ? 1 : 0);
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con,
@@ -370,21 +459,22 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(
}
taos_free_result((void *)res);
- jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res);
+ jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS *)con, (void *)res);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
jint ret = taos_affected_rows((SSqlObj *)res);
- jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret);
+ jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res,
+ (int32_t)ret);
return ret;
}
@@ -392,13 +482,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp(JNIEnv *env, jobject jobj,
jlong con, jlong res,
jobject arrayListObj) {
- TAOS *tscon = (TAOS *)con;
+ TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
- TAOS_RES* tres = (TAOS_RES*) res;
+ TAOS_RES * tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t num_fields = taos_num_fields(tres);
@@ -452,7 +542,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
int32_t numOfFields = taos_num_fields(result);
if (numOfFields == 0) {
- jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields);
+ jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void *)res, numOfFields);
return JNI_NUM_OF_FIELDS_0;
}
@@ -460,7 +550,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
if (row == NULL) {
int code = taos_errno(result);
if (code == TSDB_CODE_SUCCESS) {
- jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields);
+ jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void *)res,
+ numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon);
@@ -468,7 +559,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
}
- int32_t* length = taos_fetch_lengths(result);
+ int32_t *length = taos_fetch_lengths(result);
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
@@ -533,7 +624,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con,
- jlong res, jobject rowobj) {
+ jlong res, jobject rowobj) {
TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
@@ -564,8 +655,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields);
for (int i = 0; i < numOfFields; i++) {
- (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows,
- jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows));
+ int bytes = fields[i].bytes;
+
+ if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_NCHAR) {
+ bytes += 2;
+ }
+ (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, bytes * numOfRows,
+ jniFromNCharToByteArray(env, (char *)row[i], bytes * numOfRows));
}
return JNI_SUCCESS;
@@ -585,7 +681,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con,
- jboolean restart, jstring jtopic, jstring jsql, jint jinterval) {
+ jboolean restart, jstring jtopic,
+ jstring jsql, jint jinterval) {
jlong sub = 0;
TAOS *taos = (TAOS *)con;
char *topic = NULL;
@@ -682,8 +779,8 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(J
* @param res the TAOS_RES object, i.e. the SSqlObject
* @return precision 0:ms 1:us 2:ns
*/
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj, jlong con,
- jlong res) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj,
+ jlong con, jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
@@ -699,7 +796,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrec
return taos_result_precision(result);
}
-JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) {
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj,
+ jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
@@ -713,7 +811,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J
jsize len = (*env)->GetArrayLength(env, jsql);
- char *str = (char *) calloc(1, sizeof(char) * (len + 1));
+ char *str = (char *)calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
@@ -724,25 +822,27 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J
// todo handle error
}
- TAOS_STMT* pStmt = taos_stmt_init(tscon);
- int32_t code = taos_stmt_prepare(pStmt, str, len);
+ TAOS_STMT *pStmt = taos_stmt_init(tscon);
+ int32_t code = taos_stmt_prepare(pStmt, str, len);
tfree(str);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
return JNI_TDENGINE_ERROR;
}
- return (jlong) pStmt;
+ return (jlong)pStmt;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj,
+ jlong stmt, jstring jname,
+ jlong conn) {
TAOS *tsconn = (TAOS *)conn;
if (tsconn == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
- TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
return JNI_SQL_NULL;
@@ -750,7 +850,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI
const char *name = (*env)->GetStringUTFChars(env, jname, NULL);
- int32_t code = taos_stmt_set_tbname((void*)stmt, name);
+ int32_t code = taos_stmt_set_tbname((void *)stmt, name);
if (code != TSDB_CODE_SUCCESS) {
(*env)->ReleaseStringUTFChars(env, jname, name);
@@ -763,8 +863,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI
return JNI_SUCCESS;
}
-JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt,
- jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(
+ JNIEnv *env, jobject jobj, jlong stmt, jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList,
+ jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
@@ -798,14 +899,14 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J
}
// bind multi-rows with only one invoke.
- TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND));
+ TAOS_MULTI_BIND *b = calloc(1, sizeof(TAOS_MULTI_BIND));
- b->num = numOfRows;
- b->buffer_type = dataType; // todo check data type
- b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes;
- b->is_null = nullArray;
- b->buffer = colBuf;
- b->length = (int32_t*)lengthArray;
+ b->num = numOfRows;
+ b->buffer_type = dataType; // todo check data type
+ b->buffer_length = IS_VAR_DATA_TYPE(dataType) ? dataBytes : tDataTypes[dataType].bytes;
+ b->is_null = nullArray;
+ b->buffer = colBuf;
+ b->length = (int32_t *)lengthArray;
// set the length and is_null array
if (!IS_VAR_DATA_TYPE(dataType)) {
@@ -829,14 +930,15 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J
return JNI_SUCCESS;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt,
+ jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
- TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
@@ -853,14 +955,15 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J
return JNI_SUCCESS;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt,
+ jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
- TAOS_STMT *pStmt = (TAOS_STMT*) stmt;
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon);
return JNI_SQL_NULL;
@@ -876,15 +979,16 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv
return JNI_SUCCESS;
}
-JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(JNIEnv *env, jobject jobj,
- jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList, jbyteArray lengthList, jbyteArray nullList, jlong conn) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(
+ JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList,
+ jbyteArray lengthList, jbyteArray nullList, jlong conn) {
TAOS *tsconn = (TAOS *)conn;
if (tsconn == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
- TAOS_STMT* pStmt = (TAOS_STMT*) stmt;
+ TAOS_STMT *pStmt = (TAOS_STMT *)stmt;
if (pStmt == NULL) {
jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn);
return JNI_SQL_NULL;
@@ -898,39 +1002,39 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
}
len = (*env)->GetArrayLength(env, lengthList);
- int64_t *lengthArray = (int64_t*) calloc(1, len);
- (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray);
+ int64_t *lengthArray = (int64_t *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray);
if ((*env)->ExceptionCheck(env)) {
}
len = (*env)->GetArrayLength(env, typeList);
- char *typeArray = (char*) calloc(1, len);
- (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte*) typeArray);
+ char *typeArray = (char *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray);
if ((*env)->ExceptionCheck(env)) {
}
len = (*env)->GetArrayLength(env, nullList);
- int32_t *nullArray = (int32_t*) calloc(1, len);
- (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray);
+ int32_t *nullArray = (int32_t *)calloc(1, len);
+ (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray);
if ((*env)->ExceptionCheck(env)) {
}
const char *name = (*env)->GetStringUTFChars(env, tableName, NULL);
- char* curTags = tagsData;
+ char * curTags = tagsData;
TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND));
- for(int32_t i = 0; i < numOfTags; ++i) {
+ for (int32_t i = 0; i < numOfTags; ++i) {
tagsBind[i].buffer_type = typeArray[i];
- tagsBind[i].buffer = curTags;
+ tagsBind[i].buffer = curTags;
tagsBind[i].is_null = &nullArray[i];
- tagsBind[i].length = (uintptr_t*) &lengthArray[i];
+ tagsBind[i].length = (uintptr_t *)&lengthArray[i];
curTags += lengthArray[i];
}
- int32_t code = taos_stmt_set_tbname_tags((void*)stmt, name, tagsBind);
+ int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind);
- int32_t nTags = (int32_t) numOfTags;
+ int32_t nTags = (int32_t)numOfTags;
jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags);
tfree(tagsData);
@@ -948,28 +1052,28 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
- jobjectArray lines, jlong conn) {
+ jobjectArray lines, jlong conn) {
TAOS *taos = (TAOS *)conn;
if (taos == NULL) {
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
- int numLines = (*env)->GetArrayLength(env, lines);
- char** c_lines = calloc(numLines, sizeof(char*));
+ int numLines = (*env)->GetArrayLength(env, lines);
+ char **c_lines = calloc(numLines, sizeof(char *));
if (c_lines == NULL) {
jniError("c_lines:%p, alloc memory failed", c_lines);
return JNI_OUT_OF_MEMORY;
}
for (int i = 0; i < numLines; ++i) {
- jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i));
- c_lines[i] = (char*)(*env)->GetStringUTFChars(env, line, 0);
+ jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i));
+ c_lines[i] = (char *)(*env)->GetStringUTFChars(env, line, 0);
}
int code = taos_insert_lines(taos, c_lines, numLines);
for (int i = 0; i < numLines; ++i) {
- jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i));
+ jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i));
(*env)->ReleaseStringUTFChars(env, line, c_lines[i]);
}
diff --git a/src/client/src/taos.def b/src/client/src/taos.def
index 7d3b8e80c20226c4a509c95ab5728f41852110f5..f1ff17a491e795120494b00f59a800aa6bbc889a 100644
--- a/src/client/src/taos.def
+++ b/src/client/src/taos.def
@@ -2,6 +2,7 @@ EXPORTS
taos_init
taos_cleanup
taos_options
+taos_set_config
taos_connect
taos_connect_auth
taos_close
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 174610ec79fee83411e2ee92a79d073894fdb002..4a621d47c0dcae4c2765d53b0d5b650e22d64a58 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -60,17 +60,25 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
pCmd->resColumnId = TSDB_RES_COL_ID;
+ taosAcquireRef(tscObjRef, pSql->self);
+
int32_t code = tsParseSql(pSql, true);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
+
+ if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ taosReleaseRef(tscObjRef, pSql->self);
+ return;
+ }
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscAsyncResultOnError(pSql);
+ taosReleaseRef(tscObjRef, pSql->self);
return;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
executeQuery(pSql, pQueryInfo);
+ taosReleaseRef(tscObjRef, pSql->self);
}
// TODO return the correct error code to client in tscQueueAsyncError
@@ -351,7 +359,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pSql->pStream == NULL) {
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
- if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
+ if (pQueryInfo != NULL && TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
tscDebug("0x%" PRIx64 " continue parse sql after get table-meta", pSql->self);
code = tsParseSql(pSql, false);
@@ -363,15 +371,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- code = tscGetTableMeta(pSql, pTableMetaInfo);
- if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- taosReleaseRef(tscObjRef, pSql->self);
- return;
- } else {
- assert(code == TSDB_CODE_SUCCESS);
- }
-
(*pSql->fp)(pSql->param, pSql, code);
} else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert
tscImportDataFromFile(pSql);
@@ -381,7 +380,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
} else {
if (pSql->retryReason != TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " update cached table-meta, re-validate sql statement and send query again", pSql->self);
- tscResetSqlCmd(pCmd, false);
pSql->retryReason = TSDB_CODE_SUCCESS;
} else {
tscDebug("0x%" PRIx64 " cached table-meta, continue validate sql statement and send query", pSql->self);
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
index e696d54abd91ad45eccae23f2650088dab3c91ce..6acbfe3e8929c9a5a46ed0370f6cfb883988ef3e 100644
--- a/src/client/src/tscGlobalmerge.c
+++ b/src/client/src/tscGlobalmerge.c
@@ -35,6 +35,7 @@ typedef struct SCompareParam {
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
int32_t ret = 0;
+
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
@@ -564,9 +565,11 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc
(*hasPrev) = true;
}
+// tsdb_func_tag function only produce one row of result. Therefore, we need to copy the
+// output value to multiple rows
static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t numOfRows) {
if (numOfRows <= 1) {
- return ;
+ return;
}
for (int32_t k = 0; k < numOfOutput; ++k) {
@@ -574,12 +577,49 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
continue;
}
- int32_t inc = numOfRows - 1; // tsdb_func_tag function only produce one row of result
- char* src = pCtx[k].pOutput;
+ char* src = pCtx[k].pOutput;
+ char* dst = pCtx[k].pOutput + pCtx[k].outputBytes;
+
+ // Let's start from the second row, as the first row has result value already.
+ for (int32_t i = 1; i < numOfRows; ++i) {
+ memcpy(dst, src, (size_t)pCtx[k].outputBytes);
+ dst += pCtx[k].outputBytes;
+ }
+ }
+}
+
+static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) {
+ for (int32_t j = 0; j < numOfExpr; ++j) {
+ pCtx[j].pInput = pDataPtr[j] + pCtx[j].inputBytes * rowIndex;
+ }
+
+ for (int32_t j = 0; j < numOfExpr; ++j) {
+ int32_t functionId = pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
+ doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
+ } else {
+ aAggs[functionId].mergeFunc(&pCtx[j]);
+ }
+ }
+}
+
+static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
+ for(int32_t j = 0; j < numOfExpr; ++j) {
+ int32_t functionId = pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
- for (int32_t i = 0; i < inc; ++i) {
- pCtx[k].pOutput += pCtx[k].outputBytes;
- memcpy(pCtx[k].pOutput, src, (size_t)pCtx[k].outputBytes);
+ if (functionId < 0) {
+ SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
+ doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
+ } else {
+ aAggs[functionId].xFinalize(&pCtx[j]);
}
}
}
@@ -588,52 +628,18 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
SMultiwayMergeInfo* pInfo = pOperator->info;
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
- char** add = calloc(pBlock->info.numOfCols, POINTER_BYTES);
+ char** addrPtr = calloc(pBlock->info.numOfCols, POINTER_BYTES);
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
- add[i] = pCtx[i].pInput;
+ addrPtr[i] = pCtx[i].pInput;
pCtx[i].size = 1;
}
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
if (pInfo->hasPrev) {
if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
- for (int32_t j = 0; j < numOfExpr; ++j) {
- pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
- }
-
- for (int32_t j = 0; j < numOfExpr; ++j) {
- int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- if (functionId < 0) {
- SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
-
- doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
-
- continue;
- }
-
- aAggs[functionId].mergeFunc(&pCtx[j]);
- }
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
} else {
- for(int32_t j = 0; j < numOfExpr; ++j) { // TODO refactor
- int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- if (functionId < 0) {
- SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
-
- doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
-
- continue;
- }
-
- aAggs[functionId].xFinalize(&pCtx[j]);
- }
+ doFinalizeResultImpl(pInfo, pCtx, numOfExpr);
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput);
setTagValueForMultipleRows(pCtx, pOperator->numOfOutput, numOfRows);
@@ -643,7 +649,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
for(int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows);
if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) {
- pCtx[j].ptsOutputBuf = pCtx[0].pOutput;
+ if(j > 0) pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput;
}
}
@@ -655,48 +661,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
}
- for (int32_t j = 0; j < numOfExpr; ++j) {
- pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
- }
-
- for (int32_t j = 0; j < numOfExpr; ++j) {
- int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- if (functionId < 0) {
- SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
-
- doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
-
- continue;
- }
-
- aAggs[functionId].mergeFunc(&pCtx[j]);
- }
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
}
} else {
- for (int32_t j = 0; j < numOfExpr; ++j) {
- pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
- }
-
- for (int32_t j = 0; j < numOfExpr; ++j) {
- int32_t functionId = pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- if (functionId < 0) {
- SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
-
- doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
-
- continue;
- }
-
- aAggs[functionId].mergeFunc(&pCtx[j]);
- }
+ doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
}
savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev);
@@ -704,11 +672,11 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
{
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
- pCtx[i].pInput = add[i];
+ pCtx[i].pInput = addrPtr[i];
}
}
- tfree(add);
+ tfree(addrPtr);
}
static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
@@ -816,6 +784,8 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
SLocalDataSource *pOneDataSrc = pMerger->pLocalDataSrc[pTree->pNode[0].index];
bool sameGroup = true;
if (pInfo->hasPrev) {
+
+ // todo refactor extract method
int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
// if this row belongs to current result set group
@@ -955,9 +925,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
break;
}
+ bool sameGroup = true;
if (pAggInfo->hasGroupColData) {
- bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
- if (!sameGroup) {
+ sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
+ if (!sameGroup && !pAggInfo->multiGroupResults) {
*newgroup = true;
pAggInfo->hasDataBlockForNewGroup = true;
pAggInfo->pExistBlock = pBlock;
@@ -976,26 +947,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
}
if (handleData) { // data in current group is all handled
- for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
- int32_t functionId = pAggInfo->binfo.pCtx[j].functionId;
- if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
- continue;
- }
-
- if (functionId < 0) {
- SUdfInfo* pUdfInfo = taosArrayGet(pAggInfo->udfInfo, -1 * functionId - 1);
-
- doInvokeUdf(pUdfInfo, &pAggInfo->binfo.pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
-
- continue;
- }
-
- aAggs[functionId].xFinalize(&pAggInfo->binfo.pCtx[j]);
- }
-
+ doFinalizeResultImpl(pAggInfo, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
- pAggInfo->binfo.pRes->info.rows += numOfRows;
+ pAggInfo->binfo.pRes->info.rows += numOfRows;
setTagValueForMultipleRows(pAggInfo->binfo.pCtx, pOperator->numOfOutput, numOfRows);
}
@@ -1019,71 +974,127 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
return (pRes->info.rows != 0)? pRes:NULL;
}
-static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
- SSLimitOperatorInfo *pInfo = pOperator->info;
- assert(pInfo->currentGroupOffset >= 0);
+static void doHandleDataInCurrentGroup(SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t rowIndex) {
+ if (pInfo->currentOffset > 0) {
+ pInfo->currentOffset -= 1;
+ } else {
+ // discard the data rows in current group
+ if (pInfo->limit.limit < 0 || (pInfo->limit.limit >= 0 && pInfo->rowsTotal < pInfo->limit.limit)) {
+ size_t num1 = taosArrayGetSize(pInfo->pRes->pDataBlock);
+ for (int32_t i = 0; i < num1; ++i) {
+ SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+ SColumnInfoData *pDstInfoData = taosArrayGet(pInfo->pRes->pDataBlock, i);
- SSDataBlock* pBlock = NULL;
- if (pInfo->currentGroupOffset == 0) {
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
- if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- }
+ SColumnInfo *pColInfo = &pColInfoData->info;
+
+ char *pSrc = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
+ char *pDst = (char *)pDstInfoData->pData + (pInfo->pRes->info.rows * pColInfo->bytes);
- if (*newgroup == false && pInfo->limit.limit > 0 && pInfo->rowsTotal >= pInfo->limit.limit) {
- while ((*newgroup) == false) { // ignore the remain blocks
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
- if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
- }
+ memcpy(pDst, pSrc, pColInfo->bytes);
}
+
+ pInfo->rowsTotal += 1;
+ pInfo->pRes->info.rows += 1;
}
+ }
+}
+
+static void ensureOutputBuf(SSLimitOperatorInfo * pInfo, SSDataBlock *pResultBlock, int32_t numOfRows) {
+ if (pInfo->capacity < pResultBlock->info.rows + numOfRows) {
+ int32_t total = pResultBlock->info.rows + numOfRows;
+
+ size_t num = taosArrayGetSize(pResultBlock->pDataBlock);
+ for (int32_t i = 0; i < num; ++i) {
+ SColumnInfoData *pInfoData = taosArrayGet(pResultBlock->pDataBlock, i);
+
+ char *tmp = realloc(pInfoData->pData, total * pInfoData->info.bytes);
+ if (tmp != NULL) {
+ pInfoData->pData = tmp;
+ } else {
+ // todo handle the malloc failure
+ }
- return pBlock;
+ pInfo->capacity = total;
+ pInfo->threshold = (int64_t)(total * 0.8);
+ }
}
+}
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+enum {
+ BLOCK_NEW_GROUP = 1,
+ BLOCK_NO_GROUP = 2,
+ BLOCK_SAME_GROUP = 3,
+};
- if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
+static int32_t doSlimitImpl(SOperatorInfo* pOperator, SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock) {
+ int32_t rowIndex = 0;
+
+ while (rowIndex < pBlock->info.rows) {
+ int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
+
+ bool samegroup = true;
+ if (pInfo->hasPrev) {
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColIndex *pIndex = taosArrayGet(pInfo->orderColumnList, i);
+ SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex);
+
+ SColumnInfo *pColInfo = &pColInfoData->info;
+
+ char *d = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
+ int32_t ret = columnValueAscendingComparator(pInfo->prevRow[i], d, pColInfo->type, pColInfo->bytes);
+ if (ret != 0) { // it is a new group
+ samegroup = false;
+ break;
+ }
+ }
}
- while(1) {
- if (*newgroup) {
- pInfo->currentGroupOffset -= 1;
- *newgroup = false;
+ if (!samegroup || !pInfo->hasPrev) {
+ pInfo->ignoreCurrentGroup = false;
+ savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, rowIndex, &pInfo->hasPrev);
+
+ pInfo->currentOffset = pInfo->limit.offset; // reset the offset value for a new group
+ pInfo->rowsTotal = 0;
+
+ if (pInfo->currentGroupOffset > 0) {
+ pInfo->ignoreCurrentGroup = true;
+ pInfo->currentGroupOffset -= 1; // now we are in the next group data
+ rowIndex += 1;
+ continue;
+ }
+
+ // A new group has arrived according to the result rows, and the group limitation has already reached.
+ // Let's jump out of current loop and return immediately.
+ if (pInfo->slimit.limit >= 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
+ setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
+ pOperator->status = OP_EXEC_DONE;
+ return BLOCK_NO_GROUP;
}
- while ((*newgroup) == false) {
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
- pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
- publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+ pInfo->groupTotal += 1;
- if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
- }
+ // data in current group not allowed, return if current result does not belong to the previous group.And there
+ // are results exists in current SSDataBlock
+ if (!pInfo->multigroupResult && !samegroup && pInfo->pRes->info.rows > 0) {
+ return BLOCK_NEW_GROUP;
}
- // now we have got the first data block of the next group.
- if (pInfo->currentGroupOffset == 0) {
- return pBlock;
+ doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
+
+ } else { // handle the offset in the same group
+ // All the data in current group needs to be discarded, due to the limit parameter in the SQL statement
+ if (pInfo->ignoreCurrentGroup) {
+ rowIndex += 1;
+ continue;
}
+
+ doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
}
- return NULL;
+ rowIndex += 1;
+ }
+
+ return BLOCK_SAME_GROUP;
}
SSDataBlock* doSLimit(void* param, bool* newgroup) {
@@ -1093,63 +1104,41 @@ SSDataBlock* doSLimit(void* param, bool* newgroup) {
}
SSLimitOperatorInfo *pInfo = pOperator->info;
+ pInfo->pRes->info.rows = 0;
- SSDataBlock *pBlock = NULL;
- while (1) {
- pBlock = skipGroupBlock(pOperator, newgroup);
- if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
- }
-
- if (*newgroup) { // a new group arrives
- pInfo->groupTotal += 1;
- pInfo->rowsTotal = 0;
- pInfo->currentOffset = pInfo->limit.offset;
- }
+ if (pInfo->pPrevBlock != NULL) {
+ ensureOutputBuf(pInfo, pInfo->pRes, pInfo->pPrevBlock->info.rows);
+ int32_t ret = doSlimitImpl(pOperator, pInfo, pInfo->pPrevBlock);
+ assert(ret != BLOCK_NEW_GROUP);
- assert(pInfo->currentGroupOffset == 0);
-
- if (pInfo->currentOffset >= pBlock->info.rows) {
- pInfo->currentOffset -= pBlock->info.rows;
- } else {
- if (pInfo->currentOffset == 0) {
- break;
- }
-
- int32_t remain = (int32_t)(pBlock->info.rows - pInfo->currentOffset);
- pBlock->info.rows = remain;
+ pInfo->pPrevBlock = NULL;
+ }
- // move the remain rows of this data block to the front.
- for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
- SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
+ assert(pInfo->currentGroupOffset >= 0);
- int16_t bytes = pColInfoData->info.bytes;
- memmove(pColInfoData->pData, pColInfoData->pData + bytes * pInfo->currentOffset, remain * bytes);
- }
+ while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
- pInfo->currentOffset = 0;
- break;
+ if (pBlock == NULL) {
+ return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
- }
-
- if (pInfo->slimit.limit > 0 && pInfo->groupTotal > pInfo->slimit.limit) { // reach the group limit, abort
- return NULL;
- }
- if (pInfo->limit.limit > 0 && (pInfo->rowsTotal + pBlock->info.rows >= pInfo->limit.limit)) {
- pBlock->info.rows = (int32_t)(pInfo->limit.limit - pInfo->rowsTotal);
- pInfo->rowsTotal = pInfo->limit.limit;
+ ensureOutputBuf(pInfo, pInfo->pRes, pBlock->info.rows);
+ int32_t ret = doSlimitImpl(pOperator, pInfo, pBlock);
+ if (ret == BLOCK_NEW_GROUP) {
+ pInfo->pPrevBlock = pBlock;
+ return pInfo->pRes;
+ }
- if (pInfo->slimit.limit > 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
- pOperator->status = OP_EXEC_DONE;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
- // setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- } else {
- pInfo->rowsTotal += pBlock->info.rows;
+ // now the number of rows in current group is enough, let's return to the invoke function
+ if (pInfo->pRes->info.rows > pInfo->threshold) {
+ return pInfo->pRes;
+ }
}
-
- return pBlock;
}
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 89e3832007f11dc0ede00e639d75875f142b12f1..1bf27e6cad1d57fdfd4b786d1cdcea981bf3333b 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -51,20 +51,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
}
}
+ // default compareStat is ROW_COMPARE_NO_NEED
if (nBoundCols == 0) { // file input
pBuilder->memRowType = SMEM_ROW_DATA;
- pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
} else {
float boundRatio = ((float)nBoundCols / (float)nCols);
if (boundRatio < KVRatioKV) {
pBuilder->memRowType = SMEM_ROW_KV;
- pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
} else if (boundRatio > KVRatioData) {
pBuilder->memRowType = SMEM_ROW_DATA;
- pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS;
}
pBuilder->compareStat = ROW_COMPARE_NEED;
@@ -76,7 +74,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
}
}
- pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
if (nRows > 0) {
@@ -86,7 +83,7 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
}
for (int i = 0; i < nRows; ++i) {
- (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen;
+ (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
(pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
}
}
@@ -460,7 +457,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
STableMeta * pTableMeta = pDataBlocks->pTableMeta;
SSchema * schema = tscGetTableSchema(pTableMeta);
SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
- int32_t dataLen = pBuilder->dataRowInitLen;
+ int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t kvLen = pBuilder->kvRowInitLen;
bool isParseBindParam = false;
@@ -809,13 +806,12 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
// allocate memory
size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
- size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple);
- char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc);
+ char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc);
if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
- pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc;
+ pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc;
}
memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
@@ -1595,7 +1591,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (pSql->parseRetry < 1 && (ret == TSDB_CODE_TSC_SQL_SYNTAX_ERROR || ret == TSDB_CODE_TSC_INVALID_OPERATION)) {
tscDebug("0x%"PRIx64 " parse insert sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
- tscResetSqlCmd(pCmd, true);
+ tscResetSqlCmd(pCmd, true, pSql->self);
pSql->parseRetry++;
if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) {
@@ -1612,7 +1608,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (ret == TSDB_CODE_TSC_INVALID_OPERATION && pSql->parseRetry < 1 && sqlInfo.type == TSDB_SQL_SELECT) {
tscDebug("0x%"PRIx64 " parse query sql statement failed, code:%s, clear meta cache and retry ", pSql->self, tstrerror(ret));
- tscResetSqlCmd(pCmd, true);
+ tscResetSqlCmd(pCmd, true, pSql->self);
pSql->parseRetry++;
ret = tscValidateSqlInfo(pSql, &sqlInfo);
@@ -1697,7 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
- SInsertStatementParam* pInsertParam = &pCmd->insertParam;
+ SInsertStatementParam *pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam);
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
@@ -1726,12 +1722,6 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
goto _error;
}
- if (TSDB_CODE_SUCCESS !=
- (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams,
- pTableDataBlock->boundColumnInfo.allNullLen))) {
- goto _error;
- }
-
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0;
@@ -1787,6 +1777,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
}
_error:
+ pParentSql->res.code = code;
tfree(tokenBuf);
tfree(line);
taos_free_result(pSql);
diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c
index 2a16f1aad6525a25da652f643dc7b7f91debd432..e26e439492cec9c83b624c2bbb2bbc3a95de97b0 100644
--- a/src/client/src/tscParseLineProtocol.c
+++ b/src/client/src/tscParseLineProtocol.c
@@ -17,6 +17,7 @@
#include "tscLog.h"
#include "taos.h"
+#include "tscParseLine.h"
typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN];
@@ -27,44 +28,19 @@ typedef struct {
uint8_t precision;
} SSmlSTableSchema;
-typedef struct {
- char* key;
- uint8_t type;
- int16_t length;
- char* value;
-
- //===================================
- uint32_t fieldSchemaIdx;
-} TAOS_SML_KV;
-
-typedef struct {
- char* stableName;
-
- char* childTableName;
- TAOS_SML_KV* tags;
- int32_t tagNum;
-
- // first kv must be timestamp
- TAOS_SML_KV* fields;
- int32_t fieldNum;
-
- //================================
- uint32_t schemaIdx;
-} TAOS_SML_DATA_POINT;
+//=================================================================================================
-typedef enum {
- SML_TIME_STAMP_NOW,
- SML_TIME_STAMP_SECONDS,
- SML_TIME_STAMP_MILLI_SECONDS,
- SML_TIME_STAMP_MICRO_SECONDS,
- SML_TIME_STAMP_NANO_SECONDS
-} SMLTimeStampType;
+static uint64_t linesSmlHandleId = 0;
-typedef struct {
+uint64_t genLinesSmlId() {
uint64_t id;
-} SSmlLinesInfo;
-//=================================================================================================
+ do {
+ id = atomic_add_fetch_64(&linesSmlHandleId, 1);
+ } while (id == 0);
+
+ return id;
+}
int compareSmlColKv(const void* p1, const void* p2) {
TAOS_SML_KV* kv1 = (TAOS_SML_KV*)p1;
@@ -168,11 +144,46 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra
taosHashPut(hash, field.name, tagKeyLen, &fieldIdx, sizeof(fieldIdx));
}
- smlKv->fieldSchemaIdx = (uint32_t)fieldIdx;
+ uintptr_t valPointer = (uintptr_t)smlKv;
+ taosHashPut(info->smlDataToSchema, &valPointer, sizeof(uintptr_t), &fieldIdx, sizeof(fieldIdx));
return 0;
}
+static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
+ SSmlLinesInfo* info) {
+ tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
+ qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
+
+ SStringBuilder sb; memset(&sb, 0, sizeof(sb));
+ char sTableName[TSDB_TABLE_NAME_LEN] = {0};
+ strtolower(sTableName, point->stableName);
+ taosStringBuilderAppendString(&sb, sTableName);
+ for (int j = 0; j < point->tagNum; ++j) {
+ taosStringBuilderAppendChar(&sb, ',');
+ TAOS_SML_KV* tagKv = point->tags + j;
+ char tagName[TSDB_COL_NAME_LEN] = {0};
+ strtolower(tagName, tagKv->key);
+ taosStringBuilderAppendString(&sb, tagName);
+ taosStringBuilderAppendChar(&sb, '=');
+ taosStringBuilderAppend(&sb, tagKv->value, tagKv->length);
+ }
+ size_t len = 0;
+ char* keyJoined = taosStringBuilderGetResult(&sb, &len);
+ MD5_CTX context;
+ MD5Init(&context);
+ MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
+ MD5Final(&context);
+ *tableNameLen = snprintf(tableName, *tableNameLen,
+ "t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
+ context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
+ context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
+ context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
+ taosStringBuilderDestroy(&sb);
+ tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName);
+ return 0;
+}
+
static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) {
int32_t code = 0;
SHashObj* sname2shema = taosHashInit(32,
@@ -203,6 +214,15 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
for (int j = 0; j < point->tagNum; ++j) {
TAOS_SML_KV* tagKv = point->tags + j;
+ if (!point->childTableName) {
+ char childTableName[TSDB_TABLE_NAME_LEN];
+ int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
+ getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
+ point->childTableName = calloc(1, tableNameLen+1);
+ strncpy(point->childTableName, childTableName, tableNameLen);
+ point->childTableName[tableNameLen] = '\0';
+ }
+
code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info);
if (code != 0) {
tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, tagKv->key);
@@ -219,7 +239,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
}
}
- point->schemaIdx = (uint32_t)stableIdx;
+ uintptr_t valPointer = (uintptr_t)point;
+ taosHashPut(info->smlDataToSchema, &valPointer, sizeof(uintptr_t), &stableIdx, sizeof(stableIdx));
}
size_t numStables = taosArrayGetSize(stableSchemas);
@@ -319,7 +340,22 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
buildColumnDescription(action->alterSTable.field, result+n, capacity-n, &outBytes);
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
code = taos_errno(res);
+ char* errStr = taos_errstr(res);
+ char* begin = strstr(errStr, "duplicated column names");
+ bool tscDupColNames = (begin != NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error: %s", info->id, errStr);
+ }
taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || code == TSDB_CODE_MND_TAG_ALREAY_EXIST || tscDupColNames) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ }
break;
}
case SCHEMA_ACTION_ADD_TAG: {
@@ -328,7 +364,22 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
result+n, capacity-n, &outBytes);
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
code = taos_errno(res);
+ char* errStr = taos_errstr(res);
+ char* begin = strstr(errStr, "duplicated column names");
+ bool tscDupColNames = (begin != NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_TAG_ALREAY_EXIST || code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || tscDupColNames) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ }
break;
}
case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
@@ -337,7 +388,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
capacity-n, &outBytes);
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_INVALID_COLUMN_LENGTH || code == TSDB_CODE_TSC_INVALID_COLUMN_LENGTH) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ }
break;
}
case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
@@ -346,7 +409,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
capacity-n, &outBytes);
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_INVALID_TAG_LENGTH || code == TSDB_CODE_TSC_INVALID_TAG_LENGTH) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ }
break;
}
case SCHEMA_ACTION_CREATE_STABLE: {
@@ -375,7 +450,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
outBytes = snprintf(pos, freeBytes, ")");
TAOS_RES* res = taos_query(taos, result);
code = taos_errno(res);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
+ }
taos_free_result(res);
+
+ if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ code = taos_errno(res2);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ }
break;
}
@@ -385,7 +472,7 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
free(result);
if (code != 0) {
- tscError("SML:0x%"PRIx64 "apply schema action failure. %s", info->id, tstrerror(code));
+ tscError("SML:0x%"PRIx64 " apply schema action failure. %s", info->id, tstrerror(code));
}
return code;
}
@@ -398,70 +485,12 @@ static int32_t destroySmlSTableSchema(SSmlSTableSchema* schema) {
return 0;
}
-int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
- int32_t code = 0;
-
- STscObj *pObj = (STscObj *)taos;
- if (pObj == NULL || pObj->signature != pObj) {
- terrno = TSDB_CODE_TSC_DISCONNECTED;
- return TSDB_CODE_TSC_DISCONNECTED;
- }
-
- tscDebug("SML:0x%"PRIx64" load table schema. super table name: %s", info->id, tableName);
-
- char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
- strtolower(tableNameLowerCase, tableName);
-
- char sql[256];
- snprintf(sql, 256, "describe %s", tableNameLowerCase);
- TAOS_RES* res = taos_query(taos, sql);
- code = taos_errno(res);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" describe table failure. %s", info->id, taos_errstr(res));
- taos_free_result(res);
- return code;
- }
- taos_free_result(res);
-
- SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
- if (pSql == NULL){
- tscError("failed to allocate memory, reason:%s", strerror(errno));
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- return code;
- }
- pSql->pTscObj = taos;
- pSql->signature = pSql;
- pSql->fp = NULL;
-
- SStrToken tableToken = {.z=tableNameLowerCase, .n=(uint32_t)strlen(tableNameLowerCase), .type=TK_ID};
- tGetToken(tableNameLowerCase, &tableToken.type);
- // Check if the table name available or not
- if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
- code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
- sprintf(pSql->cmd.payload, "table name is invalid");
- tscFreeSqlObj(pSql);
- return code;
- }
-
- SName sname = {0};
- if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) {
- tscFreeSqlObj(pSql);
- return code;
- }
- char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
- memset(fullTableName, 0, tListLen(fullTableName));
- tNameExtractFullName(&sname, fullTableName);
- tscFreeSqlObj(pSql);
-
+static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
schema->tags = taosArrayInit(8, sizeof(SSchema));
schema->fields = taosArrayInit(64, sizeof(SSchema));
schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
- size_t size = 0;
- STableMeta* tableMeta = NULL;
- taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void **)&tableMeta, &size);
-
tstrncpy(schema->sTableName, tableName, strlen(tableName)+1);
schema->precision = tableMeta->tableInfo.precision;
for (int i=0; itableInfo.numOfColumns; ++i) {
@@ -484,9 +513,93 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
size_t tagIndex = taosArrayGetSize(schema->tags) - 1;
taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex));
}
- tscDebug("SML:0x%"PRIx64 " load table meta succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
+ tscDebug("SML:0x%"PRIx64 " load table schema succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision);
- free(tableMeta); tableMeta = NULL;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTableMeta, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ int32_t retries = 0;
+ STableMeta* tableMeta = NULL;
+ while (retries++ < TSDB_MAX_REPLICA && tableMeta == NULL) {
+ STscObj* pObj = (STscObj*)taos;
+ if (pObj == NULL || pObj->signature != pObj) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return TSDB_CODE_TSC_DISCONNECTED;
+ }
+
+ tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName);
+
+ char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
+ strtolower(tableNameLowerCase, tableName);
+
+ char sql[256];
+ snprintf(sql, 256, "describe %s", tableNameLowerCase);
+ TAOS_RES* res = taos_query(taos, sql);
+ code = taos_errno(res);
+ if (code != 0) {
+ tscError("SML:0x%" PRIx64 " describe table failure. %s", info->id, taos_errstr(res));
+ taos_free_result(res);
+ return code;
+ }
+ taos_free_result(res);
+
+ SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
+ if (pSql == NULL) {
+ tscError("SML:0x%" PRIx64 " failed to allocate memory, reason:%s", info->id, strerror(errno));
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return code;
+ }
+ pSql->pTscObj = taos;
+ pSql->signature = pSql;
+ pSql->fp = NULL;
+
+ registerSqlObj(pSql);
+ SStrToken tableToken = {.z = tableNameLowerCase, .n = (uint32_t)strlen(tableNameLowerCase), .type = TK_ID};
+ tGetToken(tableNameLowerCase, &tableToken.type);
+ // Check if the table name available or not
+ if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
+ code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ sprintf(pSql->cmd.payload, "table name is invalid");
+ tscFreeRegisteredSqlObj(pSql);
+ return code;
+ }
+
+ SName sname = {0};
+ if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) {
+ tscFreeRegisteredSqlObj(pSql);
+ return code;
+ }
+ char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
+ memset(fullTableName, 0, tListLen(fullTableName));
+ tNameExtractFullName(&sname, fullTableName);
+ tscFreeRegisteredSqlObj(pSql);
+
+ size_t size = 0;
+ taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size);
+ }
+
+ if (tableMeta != NULL) {
+ *pTableMeta = tableMeta;
+ return TSDB_CODE_SUCCESS;
+ } else {
+ tscError("SML:0x%" PRIx64 " failed to retrieve table meta. super table name: %s", info->id, tableName);
+ return TSDB_CODE_TSC_NO_META_CACHED;
+ }
+}
+
+static int32_t loadTableSchemaFromDB(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
+ int32_t code = 0;
+ STableMeta* tableMeta = NULL;
+ code = retrieveTableMeta(taos, tableName, &tableMeta, info);
+ if (code == TSDB_CODE_SUCCESS) {
+ assert(tableMeta != NULL);
+ fillDbSchema(tableMeta, tableName, schema, info);
+ free(tableMeta);
+ tableMeta = NULL;
+ }
+
return code;
}
@@ -498,7 +611,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSmlSTableSchema dbSchema;
memset(&dbSchema, 0, sizeof(SSmlSTableSchema));
- code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info);
+ code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
SSchemaAction schemaAction = {0};
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
@@ -507,7 +620,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
schemaAction.createSTable.tags = pointSchema->tags;
schemaAction.createSTable.fields = pointSchema->fields;
applySchemaAction(taos, &schemaAction, info);
- code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info);
+ code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
if (code != 0) {
tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
return code;
@@ -567,74 +680,6 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
return 0;
}
-static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
- SSmlLinesInfo* info) {
- tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
- qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
-
- SStringBuilder sb; memset(&sb, 0, sizeof(sb));
- char sTableName[TSDB_TABLE_NAME_LEN] = {0};
- strtolower(sTableName, point->stableName);
- taosStringBuilderAppendString(&sb, sTableName);
- for (int j = 0; j < point->tagNum; ++j) {
- taosStringBuilderAppendChar(&sb, ',');
- TAOS_SML_KV* tagKv = point->tags + j;
- char tagName[TSDB_COL_NAME_LEN] = {0};
- strtolower(tagName, tagKv->key);
- taosStringBuilderAppendString(&sb, tagName);
- taosStringBuilderAppendChar(&sb, '=');
- taosStringBuilderAppend(&sb, tagKv->value, tagKv->length);
- }
- size_t len = 0;
- char* keyJoined = taosStringBuilderGetResult(&sb, &len);
- MD5_CTX context;
- MD5Init(&context);
- MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
- MD5Final(&context);
- *tableNameLen = snprintf(tableName, *tableNameLen,
- "t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
- context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
- context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
- context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
- taosStringBuilderDestroy(&sb);
- tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName);
- return 0;
-}
-
-
-static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, const char* tagName, TAOS_BIND* bind, SSmlLinesInfo* info) {
- char sql[512];
- sprintf(sql, "alter table %s set tag %s=?", cTableName, tagName);
-
- int32_t code;
- TAOS_STMT* stmt = taos_stmt_init(taos);
- code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
-
- if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
- return code;
- }
-
- code = taos_stmt_bind_param(stmt, bind);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
- return code;
- }
-
- code = taos_stmt_execute(stmt);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
- return code;
- }
-
- code = taos_stmt_close(stmt);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
- return code;
- }
- return code;
-}
-
static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName,
SArray* tagsSchema, SArray* tagsBind, SSmlLinesInfo* info) {
size_t numTags = taosArrayGetSize(tagsSchema);
@@ -673,28 +718,28 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co
free(sql);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_prepare returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind));
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_bind_param returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
code = taos_stmt_execute(stmt);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_execute returns %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
code = taos_stmt_close(stmt);
if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_close return %d:%s", info->id, code, tstrerror(code));
return code;
}
return code;
@@ -726,27 +771,29 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols
tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu", info->id, cTableName, taosArrayGetSize(rowsBind));
int32_t code = 0;
- int32_t try = 0;
TAOS_STMT* stmt = taos_stmt_init(taos);
if (stmt == NULL) {
tfree(sql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+
code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
tfree(sql);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_prepare return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
+ bool tryAgain = false;
+ int32_t try = 0;
do {
code = taos_stmt_set_tbname(stmt, cTableName);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_set_tbname return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
@@ -755,31 +802,52 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols
TAOS_BIND* colsBinds = taosArrayGetP(rowsBind, i);
code = taos_stmt_bind_param(stmt, colsBinds);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_bind_param return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
code = taos_stmt_add_batch(stmt);
if (code != 0) {
- tfree(stmt);
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_add_batch return %d:%s", info->id, code, tstrerror(code));
+ taos_stmt_close(stmt);
return code;
}
}
code = taos_stmt_execute(stmt);
if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
+ tscError("SML:0x%"PRIx64" taos_stmt_execute return %d:%s, try:%d", info->id, code, tstrerror(code), try);
}
- } while (code == TSDB_CODE_TDB_TABLE_RECONFIGURE && try++ < TSDB_MAX_REPLICA);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
- taos_stmt_close(stmt);
- } else {
- taos_stmt_close(stmt);
- }
+ tryAgain = false;
+ if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID
+ || code == TSDB_CODE_VND_INVALID_VGROUP_ID
+ || code == TSDB_CODE_TDB_TABLE_RECONFIGURE
+ || code == TSDB_CODE_APP_NOT_READY
+ || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && try++ < TSDB_MAX_REPLICA) {
+ tryAgain = true;
+ }
+ if (code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
+ int32_t code2 = taos_errno(res2);
+ if (code2 != TSDB_CODE_SUCCESS) {
+ tscError("SML:0x%" PRIx64 " insert child table. reset query cache. error: %s", info->id, taos_errstr(res2));
+ }
+ taos_free_result(res2);
+ if (tryAgain) {
+ taosMsleep(50 * (2 << try));
+ }
+ }
+ if (code == TSDB_CODE_APP_NOT_READY || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ if (tryAgain) {
+ taosMsleep( 50 * (2 << try));
+ }
+ }
+ } while (tryAgain);
+
+
+ taos_stmt_close(stmt);
return code;
}
@@ -787,16 +855,10 @@ static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int nu
SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) {
for (int32_t i = 0; i < numPoints; ++i) {
TAOS_SML_DATA_POINT * point = points + i;
- if (!point->childTableName) {
- char childTableName[TSDB_TABLE_NAME_LEN];
- int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
- getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
- point->childTableName = calloc(1, tableNameLen+1);
- strncpy(point->childTableName, childTableName, tableNameLen);
- point->childTableName[tableNameLen] = '\0';
- }
-
- SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
+ uintptr_t valPointer = (uintptr_t)point;
+ size_t* pSchemaIndex = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
+ assert(pSchemaIndex != NULL);
+ SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, *pSchemaIndex);
for (int j = 0; j < point->tagNum; ++j) {
TAOS_SML_KV* kv = point->tags + j;
@@ -840,16 +902,10 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i);
for (int j = 0; j < pDataPoint->tagNum; ++j) {
TAOS_SML_KV* kv = pDataPoint->tags + j;
- tagKVs[kv->fieldSchemaIdx] = kv;
- }
- }
-
- int32_t notNullTagsIndices[TSDB_MAX_TAGS] = {0};
- int32_t numNotNullTags = 0;
- for (int32_t i = 0; i < numTags; ++i) {
- if (tagKVs[i] != NULL) {
- notNullTagsIndices[numNotNullTags] = i;
- ++numNotNullTags;
+ uintptr_t valPointer = (uintptr_t)kv;
+ size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
+ assert(pFieldSchemaIdx != NULL);
+ tagKVs[*pFieldSchemaIdx] = kv;
}
}
@@ -863,7 +919,10 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
for (int j = 0; j < numTags; ++j) {
if (tagKVs[j] == NULL) continue;
TAOS_SML_KV* kv = tagKVs[j];
- TAOS_BIND* bind = taosArrayGet(tagBinds, kv->fieldSchemaIdx);
+ uintptr_t valPointer = (uintptr_t)kv;
+ size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
+ assert(pFieldSchemaIdx != NULL);
+ TAOS_BIND* bind = taosArrayGet(tagBinds, *pFieldSchemaIdx);
bind->buffer_type = kv->type;
bind->length = malloc(sizeof(uintptr_t*));
*bind->length = kv->length;
@@ -871,65 +930,8 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
bind->is_null = NULL;
}
- // select tag1,tag2,... from stable where tbname in (ctable)
- char* sql = malloc(tsMaxSQLStringLen+1);
- int freeBytes = tsMaxSQLStringLen + 1;
- snprintf(sql, freeBytes, "select tbname, ");
- for (int i = 0; i < numNotNullTags ; ++i) {
- snprintf(sql + strlen(sql), freeBytes-strlen(sql), "%s,", tagKVs[notNullTagsIndices[i]]->key);
- }
- snprintf(sql + strlen(sql) - 1, freeBytes - strlen(sql) + 1,
- " from %s where tbname in (\'%s\')", sTableName, cTableName);
- sql[strlen(sql)] = '\0';
-
- TAOS_RES* result = taos_query(taos, sql);
- free(sql);
-
- int32_t code = taos_errno(result);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" get child table %s tags failed. error string %s", info->id, cTableName, taos_errstr(result));
- goto cleanup;
- }
-
- // check tag value and set tag values if different
- TAOS_ROW row = taos_fetch_row(result);
- if (row != NULL) {
- int numFields = taos_field_count(result);
- TAOS_FIELD* fields = taos_fetch_fields(result);
- int* lengths = taos_fetch_lengths(result);
- for (int i = 1; i < numFields; ++i) {
- uint8_t dbType = fields[i].type;
- int32_t length = lengths[i];
- char* val = row[i];
-
- TAOS_SML_KV* tagKV = tagKVs[notNullTagsIndices[i-1]];
- if (tagKV->type != dbType) {
- tscError("SML:0x%"PRIx64" child table %s tag %s type mismatch. point type : %d, db type : %d",
- info->id, cTableName, tagKV->key, tagKV->type, dbType);
- return TSDB_CODE_TSC_INVALID_VALUE;
- }
-
- assert(tagKV->value);
-
- if (val == NULL || length != tagKV->length || memcmp(tagKV->value, val, length) != 0) {
- TAOS_BIND* bind = taosArrayGet(tagBinds, tagKV->fieldSchemaIdx);
- code = changeChildTableTagValue(taos, cTableName, tagKV->key, bind, info);
- if (code != 0) {
- tscError("SML:0x%"PRIx64" change child table tag failed. table name %s, tag %s", info->id, cTableName, tagKV->key);
- goto cleanup;
- }
- }
- }
- tscDebug("SML:0x%"PRIx64" successfully applied point tags. child table: %s", info->id, cTableName);
- } else {
- code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info);
- if (code != 0) {
- goto cleanup;
- }
- }
+ int32_t code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info);
-cleanup:
- taos_free_result(result);
for (int i = 0; i < taosArrayGetSize(tagBinds); ++i) {
TAOS_BIND* bind = taosArrayGet(tagBinds, i);
free(bind->length);
@@ -963,7 +965,10 @@ static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema,
}
for (int j = 0; j < point->fieldNum; ++j) {
TAOS_SML_KV* kv = point->fields + j;
- TAOS_BIND* bind = colBinds + kv->fieldSchemaIdx;
+ uintptr_t valPointer = (uintptr_t)kv;
+ size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
+ assert(pFieldSchemaIdx != NULL);
+ TAOS_BIND* bind = colBinds + *pFieldSchemaIdx;
bind->buffer_type = kv->type;
bind->length = malloc(sizeof(uintptr_t*));
*bind->length = kv->length;
@@ -1000,9 +1005,11 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t
while (pCTablePoints) {
SArray* cTablePoints = *pCTablePoints;
-
TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, 0);
- SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
+ uintptr_t valPointer = (uintptr_t)point;
+ size_t* pSchemaIndex = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
+ assert(pSchemaIndex != NULL);
+ SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, *pSchemaIndex);
tscDebug("SML:0x%"PRIx64" apply child table tags. child table: %s", info->id, point->childTableName);
code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, info);
@@ -1014,7 +1021,7 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t
tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s", info->id, point->childTableName);
code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints, info);
if (code != 0) {
- tscError("Apply child table fields failed. child table %s, error %s", point->childTableName, tstrerror(code));
+ tscError("SML:0x%"PRIx64" Apply child table fields failed. child table %s, error %s", info->id, point->childTableName, tstrerror(code));
goto cleanup;
}
@@ -1034,10 +1041,11 @@ cleanup:
return code;
}
-int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) {
+int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) {
tscDebug("SML:0x%"PRIx64" taos_sml_insert. number of points: %d", info->id, numPoint);
int32_t code = TSDB_CODE_SUCCESS;
+ info->smlDataToSchema = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, false);
tscDebug("SML:0x%"PRIx64" build data point schemas", info->id);
SArray* stableSchemas = taosArrayInit(32, sizeof(SSmlSTableSchema)); // SArray
@@ -1067,6 +1075,15 @@ clean_up:
taosArrayDestroy(schema->tags);
}
taosArrayDestroy(stableSchemas);
+ taosHashCleanup(info->smlDataToSchema);
+ return code;
+}
+
+int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) {
+ SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo));
+ info->id = genLinesSmlId();
+ int code = tscSmlInsert(taos, points, numPoint, info);
+ free(info);
return code;
}
@@ -1517,8 +1534,8 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char*str, SSmlLinesInfo* info)
return true;
}
//len does not include '\0' from value.
-static bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
- uint16_t len, SSmlLinesInfo* info) {
+bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info) {
if (len <= 0) {
return false;
}
@@ -1660,7 +1677,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
if (len >= 2) {
for (int i = 0; i < len - 2; ++i) {
if(!isdigit(value[i])) {
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
}
}
@@ -1695,20 +1712,20 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
break;
}
default: {
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
}
return TSDB_CODE_SUCCESS;
}
-static int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
- uint16_t len, SSmlLinesInfo* info) {
+int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
+ uint16_t len, SSmlLinesInfo* info) {
int32_t ret;
SMLTimeStampType type;
int64_t tsVal;
if (!isTimeStamp(value, len, &type)) {
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
ret = getTimeStampValue(value, len, type, &tsVal);
@@ -1757,7 +1774,7 @@ static int32_t parseSmlTimeStamp(TAOS_SML_KV **pTS, const char **index, SSmlLine
return ret;
}
-static bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
+bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
char *val = NULL;
char *cur = key;
char keyLower[TSDB_COL_NAME_LEN];
@@ -1794,7 +1811,7 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
while (*cur != '\0') {
if (len > TSDB_COL_NAME_LEN) {
tscError("SML:0x%"PRIx64" Key field cannot exceeds 65 characters", info->id);
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
//unescaped '=' identifies a tag key
if (*cur == '=' && *(cur - 1) != '\\') {
@@ -1854,7 +1871,7 @@ static bool parseSmlValue(TAOS_SML_KV *pKV, const char **index,
free(pKV->key);
pKV->key = NULL;
free(value);
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_VALUE;
}
free(value);
@@ -1883,7 +1900,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
tscError("SML:0x%"PRIx64" Measurement field cannot exceeds 193 characters", info->id);
free(pSml->stableName);
pSml->stableName = NULL;
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
//first unescaped comma or space identifies measurement
//if space detected first, meaning no tag in the input
@@ -1910,7 +1927,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
}
//Table name can only contain digits(0-9),alphebet(a-z),underscore(_)
-static int32_t isValidChildTableName(const char *pTbName, int16_t len) {
+int32_t isValidChildTableName(const char *pTbName, int16_t len) {
const char *cur = pTbName;
for (int i = 0; i < len; ++i) {
if(!isdigit(cur[i]) && !isalpha(cur[i]) && (cur[i] != '_')) {
@@ -2076,18 +2093,6 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
//=========================================================================
-static uint64_t linesSmlHandleId = 0;
-
-uint64_t genLinesSmlId() {
- uint64_t id;
-
- do {
- id = atomic_add_fetch_64(&linesSmlHandleId, 1);
- } while (id == 0);
-
- return id;
-}
-
void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) {
for (int i=0; itagNum; ++i) {
free((point->tags+i)->key);
@@ -2110,24 +2115,25 @@ int32_t tscParseLines(char* lines[], int numLines, SArray* points, SArray* faile
if (code != TSDB_CODE_SUCCESS) {
tscError("SML:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
destroySmlDataPoint(&point);
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ return code;
} else {
tscDebug("SML:0x%"PRIx64" data point line parse success. line %d", info->id, i);
}
taosArrayPush(points, &point);
}
- return 0;
+ return TSDB_CODE_SUCCESS;
}
int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
int32_t code = 0;
- SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo));
+ SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
info->id = genLinesSmlId();
if (numLines <= 0 || numLines > 65536) {
tscError("SML:0x%"PRIx64" taos_insert_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines);
+ tfree(info);
code = TSDB_CODE_TSC_APP_ERROR;
return code;
}
@@ -2135,7 +2141,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
for (int i = 0; i < numLines; ++i) {
if (lines[i] == NULL) {
tscError("SML:0x%"PRIx64" taos_insert_lines line %d is NULL", info->id, i);
- free(info);
+ tfree(info);
code = TSDB_CODE_TSC_APP_ERROR;
return code;
}
@@ -2144,7 +2150,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT));
if (lpPoints == NULL) {
tscError("SML:0x%"PRIx64" taos_insert_lines failed to allocate memory", info->id);
- free(info);
+ tfree(info);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2157,7 +2163,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
}
TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
- code = taos_sml_insert(taos, points, (int)numPoints, info);
+ code = tscSmlInsert(taos, points, (int)numPoints, info);
if (code != 0) {
tscError("SML:0x%"PRIx64" taos_sml_insert error: %s", info->id, tstrerror((code)));
}
@@ -2172,7 +2178,7 @@ cleanup:
taosArrayDestroy(lpPoints);
- free(info);
+ tfree(info);
return code;
}
diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c
new file mode 100644
index 0000000000000000000000000000000000000000..8e0322cab07ba462b7320cef02011b27b18785d5
--- /dev/null
+++ b/src/client/src/tscParseOpenTSDB.c
@@ -0,0 +1,976 @@
+#include
+#include
+#include
+#include
+
+#include "cJSON.h"
+#include "hash.h"
+#include "taos.h"
+
+#include "tscUtil.h"
+#include "tsclient.h"
+#include "tscLog.h"
+
+#include "tscParseLine.h"
+
+#define OTD_MAX_FIELDS_NUM 2
+#define OTD_JSON_SUB_FIELDS_NUM 2
+#define OTD_JSON_FIELDS_NUM 4
+
+#define OTD_TIMESTAMP_COLUMN_NAME "ts"
+#define OTD_METRIC_VALUE_COLUMN_NAME "value"
+
+/* telnet style API parser */
+static uint64_t HandleId = 0;
+
+static uint64_t genUID() {
+ uint64_t id;
+
+ do {
+ id = atomic_add_fetch_64(&HandleId, 1);
+ } while (id == 0);
+
+ return id;
+}
+
+static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index, SSmlLinesInfo* info) {
+ const char *cur = *index;
+ uint16_t len = 0;
+
+ pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write
+ if (pSml->stableName == NULL){
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ if (isdigit(*cur)) {
+ tscError("OTD:0x%"PRIx64" Metric cannnot start with digit", info->id);
+ tfree(pSml->stableName);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ while (*cur != '\0') {
+ if (len > TSDB_TABLE_NAME_LEN) {
+ tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters", info->id);
+ tfree(pSml->stableName);
+ return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ }
+
+ if (*cur == ' ') {
+ break;
+ }
+
+ pSml->stableName[len] = *cur;
+ cur++;
+ len++;
+ }
+ if (len == 0 || *cur == '\0') {
+ tfree(pSml->stableName);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ pSml->stableName[len] = '\0';
+ *index = cur + 1;
+ tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char **index, SSmlLinesInfo* info) {
+ //Timestamp must be the first KV to parse
+ assert(*num_kvs == 0);
+
+ const char *start, *cur;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ int len = 0;
+ char key[] = OTD_TIMESTAMP_COLUMN_NAME;
+ char *value = NULL;
+
+ start = cur = *index;
+ //allocate fields for timestamp and value
+ *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV));
+
+ while(*cur != '\0') {
+ if (*cur == ' ') {
+ break;
+ }
+ cur++;
+ len++;
+ }
+
+ if (len > 0 && *cur != '\0') {
+ value = tcalloc(len + 1, 1);
+ memcpy(value, start, len);
+ } else {
+ tfree(*pTS);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ ret = convertSmlTimeStamp(*pTS, value, len, info);
+ if (ret) {
+ tfree(value);
+ tfree(*pTS);
+ return ret;
+ }
+ tfree(value);
+
+ (*pTS)->key = tcalloc(sizeof(key), 1);
+ memcpy((*pTS)->key, key, sizeof(key));
+
+ *num_kvs += 1;
+ *index = cur + 1;
+
+ return ret;
+}
+
+static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const char **index, SSmlLinesInfo* info) {
+ //skip timestamp
+ TAOS_SML_KV *pVal = *pKVs + 1;
+ const char *start, *cur;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ int len = 0;
+ char key[] = OTD_METRIC_VALUE_COLUMN_NAME;
+ char *value = NULL;
+
+ start = cur = *index;
+
+ while(*cur != '\0') {
+ if (*cur == ' ') {
+ break;
+ }
+ cur++;
+ len++;
+ }
+
+ if (len > 0 && *cur != '\0') {
+ value = tcalloc(len + 1, 1);
+ memcpy(value, start, len);
+ } else {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ if (!convertSmlValueType(pVal, value, len, info)) {
+ tscError("OTD:0x%"PRIx64" Failed to convert metric value string(%s) to any type",
+ info->id, value);
+ tfree(value);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ tfree(value);
+
+ pVal->key = tcalloc(sizeof(key), 1);
+ memcpy(pVal->key, key, sizeof(key));
+ *num_kvs += 1;
+
+ *index = cur + 1;
+ return ret;
+}
+
+static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
+ const char *cur = *index;
+ char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
+ uint16_t len = 0;
+
+ //key field cannot start with digit
+ if (isdigit(*cur)) {
+ tscError("OTD:0x%"PRIx64" Tag key cannnot start with digit", info->id);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ while (*cur != '\0') {
+ if (len > TSDB_COL_NAME_LEN) {
+ tscError("OTD:0x%"PRIx64" Tag key cannot exceeds 65 characters", info->id);
+ return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
+ }
+ if (*cur == ' ') {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ if (*cur == '=') {
+ break;
+ }
+
+ key[len] = *cur;
+ cur++;
+ len++;
+ }
+ if (len == 0 || *cur == '\0') {
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+ key[len] = '\0';
+
+ if (checkDuplicateKey(key, pHash, info)) {
+ return TSDB_CODE_TSC_DUP_TAG_NAMES;
+ }
+
+ pKV->key = tcalloc(len + 1, 1);
+ memcpy(pKV->key, key, len + 1);
+ //tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
+ *index = cur + 1;
+ return TSDB_CODE_SUCCESS;
+}
+
+
+static int32_t parseTelnetTagValue(TAOS_SML_KV *pKV, const char **index,
+ bool *is_last_kv, SSmlLinesInfo* info) {
+ const char *start, *cur;
+ char *value = NULL;
+ uint16_t len = 0;
+ start = cur = *index;
+
+ while (1) {
+ // whitespace or '\0' identifies a value
+ if (*cur == ' ' || *cur == '\0') {
+ // '\0' indicates end of value
+ *is_last_kv = (*cur == '\0') ? true : false;
+ break;
+ }
+ cur++;
+ len++;
+ }
+
+ if (len == 0) {
+ tfree(pKV->key);
+ return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ }
+
+ value = tcalloc(len + 1, 1);
+ memcpy(value, start, len);
+ value[len] = '\0';
+ if (!convertSmlValueType(pKV, value, len, info)) {
+ tscError("OTD:0x%"PRIx64" Failed to convert sml value string(%s) to any type",
+ info->id, value);
+ //free previous alocated key field
+ tfree(pKV->key);
+ tfree(value);
+ return TSDB_CODE_TSC_INVALID_VALUE;
+ }
+ tfree(value);
+
+ *index = (*cur == '\0') ? cur : cur + 1;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
+ const char **index, char **childTableName,
+ SHashObj *pHash, SSmlLinesInfo* info) {
+ const char *cur = *index;
+ int32_t ret = TSDB_CODE_SUCCESS;
+ TAOS_SML_KV *pkv;
+ bool is_last_kv = false;
+
+ int32_t capacity = 4;
+ *pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV));
+ pkv = *pKVs;
+
+ while (*cur != '\0') {
+ ret = parseTelnetTagKey(pkv, &cur, pHash, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse key", info->id);
+ return ret;
+ }
+ ret = parseTelnetTagValue(pkv, &cur, &is_last_kv, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse value", info->id);
+ return ret;
+ }
+ if ((strcasecmp(pkv->key, "ID") == 0) && pkv->type == TSDB_DATA_TYPE_BINARY) {
+ ret = isValidChildTableName(pkv->value, pkv->length);
+ if (ret) {
+ return ret;
+ }
+ *childTableName = malloc(pkv->length + 1);
+ memcpy(*childTableName, pkv->value, pkv->length);
+ (*childTableName)[pkv->length] = '\0';
+ tfree(pkv->key);
+ tfree(pkv->value);
+ } else {
+ *num_kvs += 1;
+ }
+
+ if (is_last_kv) {
+ break;
+ }
+
+ //reallocate addtional memory for more kvs
+ if ((*num_kvs + 1) > capacity) {
+ TAOS_SML_KV *more_kvs = NULL;
+ capacity *= 3; capacity /= 2;
+ more_kvs = realloc(*pKVs, capacity * sizeof(TAOS_SML_KV));
+ if (!more_kvs) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ *pKVs = more_kvs;
+ }
+
+ //move pKV points to next TAOS_SML_KV block
+ pkv = *pKVs + *num_kvs;
+ }
+
+ return ret;
+}
+
+int32_t tscParseTelnetLine(const char* line, TAOS_SML_DATA_POINT* smlData, SSmlLinesInfo* info) {
+ const char* index = line;
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ //Parse metric
+ ret = parseTelnetMetric(smlData, &index, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse metric", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse metric finished", info->id);
+
+ //Parse timestamp
+ ret = parseTelnetTimeStamp(&smlData->fields, &smlData->fieldNum, &index, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse timestamp", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse timestamp finished", info->id);
+
+ //Parse value
+ ret = parseTelnetMetricValue(&smlData->fields, &smlData->fieldNum, &index, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse metric value", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse metric value finished", info->id);
+
+ //Parse tagKVs
+ SHashObj *keyHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
+ ret = parseTelnetTagKvs(&smlData->tags, &smlData->tagNum, &index, &smlData->childTableName, keyHashTable, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse tags", info->id);
+ taosHashCleanup(keyHashTable);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse tags finished", info->id);
+ taosHashCleanup(keyHashTable);
+
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tscParseTelnetLines(char* lines[], int numLines, SArray* points, SArray* failedLines, SSmlLinesInfo* info) {
+ for (int32_t i = 0; i < numLines; ++i) {
+ TAOS_SML_DATA_POINT point = {0};
+ int32_t code = tscParseTelnetLine(lines[i], &point, info);
+ if (code != TSDB_CODE_SUCCESS) {
+ tscError("OTD:0x%"PRIx64" data point line parse failed. line %d : %s", info->id, i, lines[i]);
+ destroySmlDataPoint(&point);
+ return code;
+ } else {
+ tscDebug("OTD:0x%"PRIx64" data point line parse success. line %d", info->id, i);
+ }
+
+ taosArrayPush(points, &point);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int taos_insert_telnet_lines(TAOS* taos, char* lines[], int numLines) {
+ int32_t code = 0;
+
+ SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
+ info->id = genUID();
+
+ if (numLines <= 0 || numLines > 65536) {
+ tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines numLines should be between 1 and 65536. numLines: %d", info->id, numLines);
+ tfree(info);
+ code = TSDB_CODE_TSC_APP_ERROR;
+ return code;
+ }
+
+ for (int i = 0; i < numLines; ++i) {
+ if (lines[i] == NULL) {
+ tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines line %d is NULL", info->id, i);
+ tfree(info);
+ code = TSDB_CODE_TSC_APP_ERROR;
+ return code;
+ }
+ }
+
+ SArray* lpPoints = taosArrayInit(numLines, sizeof(TAOS_SML_DATA_POINT));
+ if (lpPoints == NULL) {
+ tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines failed to allocate memory", info->id);
+ tfree(info);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d lines, first line: %s", info->id, numLines, lines[0]);
+ code = tscParseTelnetLines(lines, numLines, lpPoints, NULL, info);
+ size_t numPoints = taosArrayGetSize(lpPoints);
+
+ if (code != 0) {
+ goto cleanup;
+ }
+
+ TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
+ code = tscSmlInsert(taos, points, (int)numPoints, info);
+ if (code != 0) {
+ tscError("OTD:0x%"PRIx64" taos_insert_telnet_lines error: %s", info->id, tstrerror((code)));
+ }
+
+cleanup:
+ tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines finish inserting %d lines. code: %d", info->id, numLines, code);
+ points = TARRAY_GET_START(lpPoints);
+ numPoints = taosArrayGetSize(lpPoints);
+ for (int i = 0; i < numPoints; ++i) {
+ destroySmlDataPoint(points+i);
+ }
+
+ taosArrayDestroy(lpPoints);
+
+ tfree(info);
+ return code;
+}
+
+int taos_telnet_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) {
+ SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
+ info->id = genUID();
+ int code = tscSmlInsert(taos, points, numPoint, info);
+ tfree(info);
+ return code;
+}
+
+
+/* telnet style API parser */
+int32_t parseMetricFromJSON(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlLinesInfo* info) {
+ cJSON *metric = cJSON_GetObjectItem(root, "metric");
+ if (!cJSON_IsString(metric)) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ size_t stableLen = strlen(metric->valuestring);
+ if (stableLen > TSDB_TABLE_NAME_LEN) {
+ tscError("OTD:0x%"PRIx64" Metric cannot exceeds 193 characters in JSON", info->id);
+ return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
+ }
+
+ pSml->stableName = tcalloc(stableLen + 1, sizeof(char));
+ if (pSml->stableName == NULL){
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ if (isdigit(metric->valuestring[0])) {
+ tscError("OTD:0x%"PRIx64" Metric cannnot start with digit in JSON", info->id);
+ tfree(pSml->stableName);
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ tstrncpy(pSml->stableName, metric->valuestring, stableLen + 1);
+
+ return TSDB_CODE_SUCCESS;
+
+}
+
+int32_t parseTimestampFromJSONObj(cJSON *root, int64_t *tsVal, SSmlLinesInfo* info) {
+ int32_t size = cJSON_GetArraySize(root);
+ if (size != OTD_JSON_SUB_FIELDS_NUM) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ cJSON *value = cJSON_GetObjectItem(root, "value");
+ if (!cJSON_IsNumber(value)) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ cJSON *type = cJSON_GetObjectItem(root, "type");
+ if (!cJSON_IsString(type)) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ *tsVal = value->valueint;
+ //if timestamp value is 0 use current system time
+ if (*tsVal == 0) {
+ *tsVal = taosGetTimestampNs();
+ return TSDB_CODE_SUCCESS;
+ }
+
+ size_t typeLen = strlen(type->valuestring);
+ if (typeLen == 1 && type->valuestring[0] == 's') {
+ //seconds
+ *tsVal = (int64_t)(*tsVal * 1e9);
+ } else if (typeLen == 2 && type->valuestring[1] == 's') {
+ switch (type->valuestring[0]) {
+ case 'm':
+ //milliseconds
+ *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_NANO);
+ break;
+ case 'u':
+ //microseconds
+ *tsVal = convertTimePrecision(*tsVal, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
+ break;
+ case 'n':
+ //nanoseconds
+ *tsVal = *tsVal * 1;
+ break;
+ default:
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t parseTimestampFromJSON(cJSON *root, TAOS_SML_KV **pTS, int *num_kvs, SSmlLinesInfo* info) {
+ //Timestamp must be the first KV to parse
+ assert(*num_kvs == 0);
+ int64_t tsVal;
+ char key[] = OTD_TIMESTAMP_COLUMN_NAME;
+
+ cJSON *timestamp = cJSON_GetObjectItem(root, "timestamp");
+ if (cJSON_IsNumber(timestamp)) {
+ //timestamp value 0 indicates current system time
+ if (timestamp->valueint == 0) {
+ tsVal = taosGetTimestampNs();
+ } else {
+ tsVal = convertTimePrecision(timestamp->valueint, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
+ }
+ } else if (cJSON_IsObject(timestamp)) {
+ int32_t ret = parseTimestampFromJSONObj(timestamp, &tsVal, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id);
+ return ret;
+ }
+ } else {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ //allocate fields for timestamp and value
+ *pTS = tcalloc(OTD_MAX_FIELDS_NUM, sizeof(TAOS_SML_KV));
+
+
+ (*pTS)->key = tcalloc(sizeof(key), 1);
+ memcpy((*pTS)->key, key, sizeof(key));
+
+ (*pTS)->type = TSDB_DATA_TYPE_TIMESTAMP;
+ (*pTS)->length = (int16_t)tDataTypes[(*pTS)->type].bytes;
+ (*pTS)->value = tcalloc((*pTS)->length, 1);
+ memcpy((*pTS)->value, &tsVal, (*pTS)->length);
+
+ *num_kvs += 1;
+ return TSDB_CODE_SUCCESS;
+
+}
+
+int32_t convertJSONBool(TAOS_SML_KV *pVal, char* typeStr, int64_t valueInt, SSmlLinesInfo* info) {
+ if (strcasecmp(typeStr, "bool") != 0) {
+ tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Bool", info->id, typeStr);
+ return TSDB_CODE_TSC_INVALID_JSON_TYPE;
+ }
+ pVal->type = TSDB_DATA_TYPE_BOOL;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(bool *)(pVal->value) = valueInt ? true : false;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t convertJSONNumber(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) {
+ //tinyint
+ if (strcasecmp(typeStr, "i8") == 0 ||
+ strcasecmp(typeStr, "tinyint") == 0) {
+ if (!IS_VALID_TINYINT(value->valueint)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(tinyint)", info->id, value->valueint);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_TINYINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(int8_t *)(pVal->value) = (int8_t)(value->valueint);
+ return TSDB_CODE_SUCCESS;
+ }
+ //smallint
+ if (strcasecmp(typeStr, "i16") == 0 ||
+ strcasecmp(typeStr, "smallint") == 0) {
+ if (!IS_VALID_SMALLINT(value->valueint)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(smallint)", info->id, value->valueint);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_SMALLINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(int16_t *)(pVal->value) = (int16_t)(value->valueint);
+ return TSDB_CODE_SUCCESS;
+ }
+ //int
+ if (strcasecmp(typeStr, "i32") == 0 ||
+ strcasecmp(typeStr, "int") == 0) {
+ if (!IS_VALID_INT(value->valueint)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(int)", info->id, value->valueint);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_INT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(int32_t *)(pVal->value) = (int32_t)(value->valueint);
+ return TSDB_CODE_SUCCESS;
+ }
+ //bigint
+ if (strcasecmp(typeStr, "i64") == 0 ||
+ strcasecmp(typeStr, "bigint") == 0) {
+ if (!IS_VALID_BIGINT(value->valueint)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%"PRId64") cannot fit in type(bigint)", info->id, value->valueint);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_BIGINT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(int64_t *)(pVal->value) = (int64_t)(value->valueint);
+ return TSDB_CODE_SUCCESS;
+ }
+ //float
+ if (strcasecmp(typeStr, "f32") == 0 ||
+ strcasecmp(typeStr, "float") == 0) {
+ if (!IS_VALID_FLOAT(value->valuedouble)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(float)", info->id, value->valuedouble);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_FLOAT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(float *)(pVal->value) = (float)(value->valuedouble);
+ return TSDB_CODE_SUCCESS;
+ }
+ //double
+ if (strcasecmp(typeStr, "f64") == 0 ||
+ strcasecmp(typeStr, "double") == 0) {
+ if (!IS_VALID_DOUBLE(value->valuedouble)) {
+ tscError("OTD:0x%"PRIx64" JSON value(%f) cannot fit in type(double)", info->id, value->valuedouble);
+ return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
+ }
+ pVal->type = TSDB_DATA_TYPE_DOUBLE;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(double *)(pVal->value) = (double)(value->valuedouble);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ //if reach here means type is unsupported
+ tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON Number", info->id, typeStr);
+ return TSDB_CODE_TSC_INVALID_JSON_TYPE;
+}
+
+int32_t convertJSONString(TAOS_SML_KV *pVal, char* typeStr, cJSON *value, SSmlLinesInfo* info) {
+ if (strcasecmp(typeStr, "binary") == 0) {
+ pVal->type = TSDB_DATA_TYPE_BINARY;
+ } else if (strcasecmp(typeStr, "nchar") == 0) {
+ pVal->type = TSDB_DATA_TYPE_NCHAR;
+ } else {
+ tscError("OTD:0x%"PRIx64" invalid type(%s) for JSON String", info->id, typeStr);
+ return TSDB_CODE_TSC_INVALID_JSON_TYPE;
+ }
+ pVal->length = (int16_t)strlen(value->valuestring);
+ pVal->value = tcalloc(pVal->length + 1, 1);
+ memcpy(pVal->value, value->valuestring, pVal->length);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t parseValueFromJSONObj(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) {
+ int32_t ret = TSDB_CODE_SUCCESS;
+ int32_t size = cJSON_GetArraySize(root);
+
+ if (size != OTD_JSON_SUB_FIELDS_NUM) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ cJSON *value = cJSON_GetObjectItem(root, "value");
+ if (value == NULL) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ cJSON *type = cJSON_GetObjectItem(root, "type");
+ if (!cJSON_IsString(type)) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ switch (value->type) {
+ case cJSON_True:
+ case cJSON_False: {
+ ret = convertJSONBool(pVal, type->valuestring, value->valueint, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ break;
+ }
+ case cJSON_Number: {
+ ret = convertJSONNumber(pVal, type->valuestring, value, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ break;
+ }
+ case cJSON_String: {
+ ret = convertJSONString(pVal, type->valuestring, value, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ break;
+ }
+ default:
+ return TSDB_CODE_TSC_INVALID_JSON_TYPE;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t parseValueFromJSON(cJSON *root, TAOS_SML_KV *pVal, SSmlLinesInfo* info) {
+ int type = root->type;
+
+ switch (type) {
+ case cJSON_True:
+ case cJSON_False: {
+ pVal->type = TSDB_DATA_TYPE_BOOL;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(bool *)(pVal->value) = root->valueint ? true : false;
+ break;
+ }
+ case cJSON_Number: {
+ //convert default JSON Number type to float
+ pVal->type = TSDB_DATA_TYPE_FLOAT;
+ pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
+ pVal->value = tcalloc(pVal->length, 1);
+ *(float *)(pVal->value) = (float)(root->valuedouble);
+ break;
+ }
+ case cJSON_String: {
+ //convert default JSON String type to nchar
+ pVal->type = TSDB_DATA_TYPE_NCHAR;
+ //pVal->length = wcslen((wchar_t *)root->valuestring) * TSDB_NCHAR_SIZE;
+ pVal->length = (int16_t)strlen(root->valuestring);
+ pVal->value = tcalloc(pVal->length + 1, 1);
+ memcpy(pVal->value, root->valuestring, pVal->length);
+ break;
+ }
+ case cJSON_Object: {
+ int32_t ret = parseValueFromJSONObj(root, pVal, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("OTD:0x%"PRIx64" Failed to parse timestamp from JSON Obj", info->id);
+ return ret;
+ }
+ break;
+ }
+ default:
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, SSmlLinesInfo* info) {
+ //skip timestamp
+ TAOS_SML_KV *pVal = *pKVs + 1;
+ char key[] = OTD_METRIC_VALUE_COLUMN_NAME;
+
+ cJSON *metricVal = cJSON_GetObjectItem(root, "value");
+ if (metricVal == NULL) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ int32_t ret = parseValueFromJSON(metricVal, pVal, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+
+ pVal->key = tcalloc(sizeof(key), 1);
+ memcpy(pVal->key, key, sizeof(key));
+
+ *num_kvs += 1;
+ return TSDB_CODE_SUCCESS;
+
+}
+
+int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, char **childTableName, SSmlLinesInfo* info) {
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ cJSON *tags = cJSON_GetObjectItem(root, "tags");
+ if (tags == NULL || tags->type != cJSON_Object) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ //only pick up the first ID value as child table name
+ cJSON *id = cJSON_GetObjectItem(tags, "ID");
+ if (id != NULL) {
+ size_t idLen = strlen(id->valuestring);
+ ret = isValidChildTableName(id->valuestring, (int16_t)idLen);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ *childTableName = tcalloc(idLen + 1, sizeof(char));
+ memcpy(*childTableName, id->valuestring, idLen);
+ //remove all ID fields from tags list no case sensitive
+ while (id != NULL) {
+ cJSON_DeleteItemFromObject(tags, "ID");
+ id = cJSON_GetObjectItem(tags, "ID");
+ }
+ }
+
+ int32_t tagNum = cJSON_GetArraySize(tags);
+ //at least one tag pair required
+ if (tagNum <= 0) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ //allocate memory for tags
+ *pKVs = tcalloc(tagNum, sizeof(TAOS_SML_KV));
+ TAOS_SML_KV *pkv = *pKVs;
+
+ for (int32_t i = 0; i < tagNum; ++i) {
+ cJSON *tag = cJSON_GetArrayItem(tags, i);
+ if (tag == NULL) {
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+ //key
+ size_t keyLen = strlen(tag->string);
+ pkv->key = tcalloc(keyLen + 1, sizeof(char));
+ strncpy(pkv->key, tag->string, keyLen);
+ //value
+ ret = parseValueFromJSON(tag, pkv, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
+ }
+ *num_kvs += 1;
+ pkv++;
+ }
+
+ return ret;
+
+}
+
+int32_t tscParseJSONPayload(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlLinesInfo* info) {
+ int32_t ret = TSDB_CODE_SUCCESS;
+
+ if (!cJSON_IsObject(root)) {
+ tscError("OTD:0x%"PRIx64" data point needs to be JSON object", info->id);
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ int32_t size = cJSON_GetArraySize(root);
+ //outmost json fields has to be exactly 4
+ if (size != OTD_JSON_FIELDS_NUM) {
+ tscError("OTD:0x%"PRIx64" Invalid number of JSON fields in data point %d", info->id, size);
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ //Parse metric
+ ret = parseMetricFromJSON(root, pSml, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("OTD:0x%"PRIx64" Unable to parse metric from JSON payload", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse metric from JSON payload finished", info->id);
+
+ //Parse timestamp
+ ret = parseTimestampFromJSON(root, &pSml->fields, &pSml->fieldNum, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse timestamp from JSON payload", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse timestamp from JSON payload finished", info->id);
+
+ //Parse metric value
+ ret = parseMetricValueFromJSON(root, &pSml->fields, &pSml->fieldNum, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse metric value from JSON payload", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse metric value from JSON payload finished", info->id);
+
+ //Parse tags
+ ret = parseTagsFromJSON(root, &pSml->tags, &pSml->tagNum, &pSml->childTableName, info);
+ if (ret) {
+ tscError("OTD:0x%"PRIx64" Unable to parse tags from JSON payload", info->id);
+ return ret;
+ }
+ tscDebug("OTD:0x%"PRIx64" Parse tags from JSON payload finished", info->id);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tscParseMultiJSONPayload(char* payload, SArray* points, SSmlLinesInfo* info) {
+ int32_t payloadNum, ret;
+ ret = TSDB_CODE_SUCCESS;
+
+ if (payload == NULL) {
+ tscError("OTD:0x%"PRIx64" empty JSON Payload", info->id);
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+
+ cJSON *root = cJSON_Parse(payload);
+ //multiple data points must be sent in JSON array
+ if (cJSON_IsObject(root)) {
+ payloadNum = 1;
+ } else if (cJSON_IsArray(root)) {
+ payloadNum = cJSON_GetArraySize(root);
+ } else {
+ tscError("OTD:0x%"PRIx64" Invalid JSON Payload", info->id);
+ ret = TSDB_CODE_TSC_INVALID_JSON;
+ goto PARSE_JSON_OVER;
+ }
+
+ for (int32_t i = 0; i < payloadNum; ++i) {
+ TAOS_SML_DATA_POINT point = {0};
+ cJSON *dataPoint = (payloadNum == 1) ? root : cJSON_GetArrayItem(root, i);
+
+ ret = tscParseJSONPayload(dataPoint, &point, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ tscError("OTD:0x%"PRIx64" JSON data point parse failed", info->id);
+ destroySmlDataPoint(&point);
+ goto PARSE_JSON_OVER;
+ } else {
+ tscDebug("OTD:0x%"PRIx64" JSON data point parse success", info->id);
+ }
+ taosArrayPush(points, &point);
+ }
+
+PARSE_JSON_OVER:
+ cJSON_Delete(root);
+ return ret;
+}
+
+int taos_insert_json_payload(TAOS* taos, char* payload) {
+ int32_t code = 0;
+
+ SSmlLinesInfo* info = tcalloc(1, sizeof(SSmlLinesInfo));
+ info->id = genUID();
+
+ if (payload == NULL) {
+ tscError("OTD:0x%"PRIx64" taos_insert_json_payload payload is NULL", info->id);
+ tfree(info);
+ code = TSDB_CODE_TSC_APP_ERROR;
+ return code;
+ }
+
+ SArray* lpPoints = taosArrayInit(1, sizeof(TAOS_SML_DATA_POINT));
+ if (lpPoints == NULL) {
+ tscError("OTD:0x%"PRIx64" taos_insert_json_payload failed to allocate memory", info->id);
+ tfree(info);
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ tscDebug("OTD:0x%"PRIx64" taos_insert_telnet_lines begin inserting %d points", info->id, 1);
+ code = tscParseMultiJSONPayload(payload, lpPoints, info);
+ size_t numPoints = taosArrayGetSize(lpPoints);
+
+ if (code != 0) {
+ goto cleanup;
+ }
+
+ TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
+ code = tscSmlInsert(taos, points, (int)numPoints, info);
+ if (code != 0) {
+ tscError("OTD:0x%"PRIx64" taos_insert_json_payload error: %s", info->id, tstrerror((code)));
+ }
+
+cleanup:
+ tscDebug("OTD:0x%"PRIx64" taos_insert_json_payload finish inserting 1 Point. code: %d", info->id, code);
+ points = TARRAY_GET_START(lpPoints);
+ numPoints = taosArrayGetSize(lpPoints);
+ for (int i = 0; i < numPoints; ++i) {
+ destroySmlDataPoint(points+i);
+ }
+
+ taosArrayDestroy(lpPoints);
+
+ tfree(info);
+ return code;
+}
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index 40664241c1858f75ca1ebc6c1ec1f9cfe56358a7..d0ac0ccf4ee4bfa381a78090409a761717ceb4b0 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -206,6 +206,8 @@ static int normalStmtPrepare(STscStmt* stmt) {
return code;
}
start = i + token.n;
+ } else if (token.type == TK_ILLEGAL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "invalid sql");
}
i += token.n;
@@ -1489,7 +1491,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA;
- pSql->isBind = true;
pStmt->pSql = pSql;
pStmt->last = STMT_INIT;
@@ -1527,8 +1528,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pCmd->insertParam.objectId = pSql->self;
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
-
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
@@ -1537,6 +1539,8 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->qId = 0;
pRes->numOfRows = 1;
+ registerSqlObj(pSql);
+
strtolower(pSql->sqlstr, sql);
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
@@ -1546,8 +1550,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pSql->cmd.insertParam.numOfParams = 0;
pSql->cmd.batchSize = 0;
- registerSqlObj(pSql);
-
int32_t ret = stmtParseInsertTbTags(pSql, pStmt);
if (ret != TSDB_CODE_SUCCESS) {
STMT_RET(ret);
@@ -1694,7 +1696,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) > 0) {
SHashObj* hashList = pCmd->insertParam.pTableBlockHashList;
pCmd->insertParam.pTableBlockHashList = NULL;
- tscResetSqlCmd(pCmd, false);
+ tscResetSqlCmd(pCmd, false, pSql->self);
pCmd->insertParam.pTableBlockHashList = hashList;
}
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 92ad9b792448d659b9a79528eadf2009426c4c41..b00138b4c46943933145241b3ca9e7ef47c4fcfe 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -18,11 +18,11 @@
#include "tsclient.h"
#include "tsocket.h"
#include "ttimer.h"
-#include "tutil.h"
#include "taosmsg.h"
#include "tcq.h"
#include "taos.h"
+#include "tscUtil.h"
void tscSaveSlowQueryFp(void *handle, void *tmrId);
TAOS *tscSlowQueryConn = NULL;
@@ -227,16 +227,16 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SHeartBeatMsg *pHeartbeat = pMsg;
+
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
pHeartbeat->numOfQueries = 0;
SQueryDesc *pQdesc = (SQueryDesc *)pHeartbeat->pData;
- // We extract the lock to tscBuildHeartBeatMsg function.
-
int64_t now = taosGetTimestampMs();
SSqlObj *pSql = pObj->sqlList;
+
while (pSql) {
/*
* avoid sqlobj may not be correctly removed from sql list
@@ -248,41 +248,55 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
}
tstrncpy(pQdesc->sql, pSql->sqlstr, sizeof(pQdesc->sql));
- pQdesc->stime = htobe64(pSql->stime);
- pQdesc->queryId = htonl(pSql->queryId);
- //pQdesc->useconds = htobe64(pSql->res.useconds);
+ pQdesc->stime = htobe64(pSql->stime);
+ pQdesc->queryId = htonl(pSql->queryId);
pQdesc->useconds = htobe64(now - pSql->stime);
- pQdesc->qId = htobe64(pSql->res.qId);
+ pQdesc->qId = htobe64(pSql->res.qId);
pQdesc->sqlObjId = htobe64(pSql->self);
- pQdesc->pid = pHeartbeat->pid;
- pQdesc->stableQuery = pSql->cmd.pQueryInfo->stableQuery;
+ pQdesc->pid = pHeartbeat->pid;
pQdesc->numOfSub = pSql->subState.numOfSub;
+ // todo race condition
+ pQdesc->stableQuery = 0;
+
char *p = pQdesc->subSqlInfo;
int32_t remainLen = sizeof(pQdesc->subSqlInfo);
if (pQdesc->numOfSub == 0) {
snprintf(p, remainLen, "N/A");
} else {
- int32_t len;
- for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
- len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i,
- pSql->pSubs[i]->self,
- pSql->subState.states[i] ? 'C' : 'I');
- if (len > remainLen) {
- break;
+// SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
+// if (pQueryInfo != NULL) {
+// pQdesc->stableQuery = (pQueryInfo->stableQuery)?1:0;
+// } else {
+// pQdesc->stableQuery = 0;
+// }
+
+ if (pSql->pSubs != NULL && pSql->subState.states != NULL) {
+ for (int32_t i = 0; i < pQdesc->numOfSub; ++i) {
+ SSqlObj *psub = pSql->pSubs[i];
+ int64_t self = (psub != NULL)? psub->self : 0;
+
+ int32_t len = snprintf(p, remainLen, "[%d]0x%" PRIx64 "(%c) ", i, self, pSql->subState.states[i] ? 'C' : 'I');
+ if (len > remainLen) {
+ break;
+ }
+
+ remainLen -= len;
+ p += len;
}
- remainLen -= len;
- p += len;
}
}
- pQdesc->numOfSub = htonl(pQdesc->numOfSub);
+ pQdesc->numOfSub = htonl(pQdesc->numOfSub);
taosGetFqdn(pQdesc->fqdn);
pHeartbeat->numOfQueries++;
pQdesc++;
+
pSql = pSql->next;
- if (pHeartbeat->numOfQueries >= allocedQueriesNum) break;
+ if (pHeartbeat->numOfQueries >= allocedQueriesNum) {
+ break;
+ }
}
pHeartbeat->numOfStreams = 0;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 17b693faf250f0c8c26c0cc2350d57440e1583d7..a62a8ac3efca0836faab778224aa4a831e84e580 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -22,6 +22,7 @@
#include
#include "os.h"
+#include "regex.h"
#include "qPlan.h"
#include "qSqlparser.h"
#include "qTableMeta.h"
@@ -71,12 +72,10 @@ static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
static bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
-static char* cloneCurrentDBName(SSqlObj* pSql);
static int32_t getDelimiterIndex(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
-static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len);
static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, char* rawName, int32_t nameLength);
static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem,
@@ -116,7 +115,7 @@ static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
-static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
+static int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
@@ -279,6 +278,10 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) {
return TSDB_BINARY_OP_REMAINDER;
case TK_LIKE:
return TSDB_RELATION_LIKE;
+ case TK_MATCH:
+ return TSDB_RELATION_MATCH;
+ case TK_NMATCH:
+ return TSDB_RELATION_NMATCH;
case TK_ISNULL:
return TSDB_RELATION_ISNULL;
case TK_NOTNULL:
@@ -331,7 +334,7 @@ static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- if (convertTimestampStrToInt64(pVar, tinfo.precision) < -1) {
+ if (convertTimestampStrToInt64(pVar, tinfo.precision) < 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
return TSDB_CODE_SUCCESS;
@@ -426,13 +429,12 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) {
return TSDB_CODE_TSC_APP_ERROR;
}
close(fd);
- tfree(*buf);
return TSDB_CODE_SUCCESS;
}
int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
- const char *msg1 = "function name is too long";
+ const char *msg1 = "invalidate function name";
const char *msg2 = "path is too long";
const char *msg3 = "invalid outputtype";
const char *msg4 = "invalid script";
@@ -449,7 +451,10 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
createInfo->name.z[createInfo->name.n] = 0;
-
+ // funcname's naming rule is same to column
+ if (validateColumnName(createInfo->name.z) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
strdequote(createInfo->name.z);
if (strlen(createInfo->name.z) >= TSDB_FUNC_NAME_LEN) {
@@ -892,6 +897,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SELECT: {
+ const char * msg1 = "no nested query supported in union clause";
code = loadAllTableMeta(pSql, pInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -905,6 +911,10 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size);
+ if (size > 1 && pSqlNode->from && pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ }
+
// normalizeSqlNode(pSqlNode); // normalize the column name in each function
if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code;
@@ -925,7 +935,6 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pQueryInfo = pCmd->active;
pQueryInfo->pUdfInfo = pUdfInfo;
pQueryInfo->udfCopy = true;
-
}
}
@@ -1079,12 +1088,13 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
const char* msg1 = "sliding cannot be used without interval";
const char* msg2 = "interval cannot be less than 1 us";
const char* msg3 = "interval value is too small";
+ const char* msg4 = "only point interpolation query requires keyword EVERY";
SSqlCmd* pCmd = &pSql->cmd;
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
-
+
if (!TPARSER_HAS_TOKEN(pSqlNode->interval.interval)) {
if (TPARSER_HAS_TOKEN(pSqlNode->sliding)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
@@ -1110,7 +1120,6 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
-
// interval cannot be less than 10 milliseconds
if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
@@ -1125,9 +1134,15 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+ bool interpQuery = tscIsPointInterpQuery(pQueryInfo);
+ if ((pSqlNode->interval.token == TK_EVERY && (!interpQuery)) || (pSqlNode->interval.token == TK_INTERVAL && interpQuery)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+
// The following part is used to check for the invalid query expression.
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
}
+
static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
const char* msg1 = "invalid column name";
@@ -1534,9 +1549,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
/*
* tags name /column name is truncated in sql.y
*/
-bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
- //const char* msg1 = "timestamp not allowed in tags";
- const char* msg2 = "duplicated column names";
+int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
const char* msg3 = "tag length too long";
const char* msg4 = "invalid tag name";
const char* msg5 = "invalid binary/nchar tag length";
@@ -1551,8 +1564,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
// no more max columns
if (numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// no more than 6 tags
@@ -1560,8 +1572,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
char msg[128] = {0};
sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS);
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
// no timestamp allowable
@@ -1571,8 +1582,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
//}
if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_UBIGINT)) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@@ -1584,20 +1594,17 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
// length less than TSDB_MAX_TASG_LEN
if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// tags name can not be a keyword
if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
// binary(val), val can not be equalled to or less than 0
if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
// field name must be unique
@@ -1605,17 +1612,16 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
if (strncasecmp(pTagField->name, pSchema[i].name, sizeof(pTagField->name) - 1) == 0) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return false;
+ //return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pTagField->name, NULL);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
}
}
- return true;
+ return TSDB_CODE_SUCCESS;
}
-bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
+int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
const char* msg1 = "too many columns";
- const char* msg2 = "duplicated column names";
const char* msg3 = "column length too long";
const char* msg4 = "invalid data type";
const char* msg5 = "invalid column name";
@@ -1630,18 +1636,15 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
// no more max columns
if (numOfCols >= TSDB_MAX_COLUMNS || numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_UBIGINT) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
SSchema* pSchema = tscGetTableSchema(pTableMeta);
@@ -1652,25 +1655,23 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
}
if (pColField->bytes <= 0) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// length less than TSDB_MAX_BYTES_PER_ROW
if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
- return false;
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// field name must be unique
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- return false;
+ //return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pColField->name, NULL);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
}
}
- return true;
+ return TSDB_CODE_SUCCESS;
}
/* is contained in pFieldList or not */
@@ -1686,14 +1687,6 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
-static char* cloneCurrentDBName(SSqlObj* pSql) {
- pthread_mutex_lock(&pSql->pTscObj->mutex);
- char *p = strdup(pSql->pTscObj->db);
- pthread_mutex_unlock(&pSql->pTscObj->mutex);
-
- return p;
-}
-
/* length limitation, strstr cannot be applied */
static int32_t getDelimiterIndex(SStrToken* pTableName) {
for (uint32_t i = 0; i < pTableName->n; ++i) {
@@ -1704,57 +1697,6 @@ static int32_t getDelimiterIndex(SStrToken* pTableName) {
return -1;
}
-int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* xlen) {
- int32_t totalLen = 0;
-
- if (account != NULL) {
- int32_t len = (int32_t)strlen(account);
- strcpy(fullName, account);
- fullName[len] = TS_PATH_DELIMITER[0];
- totalLen += (len + 1);
- }
-
- /* db name is not specified, the tableName dose not include db name */
- if (pDB != NULL) {
- if (pDB->n >= TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN || pDB->n == 0) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
-
- memcpy(&fullName[totalLen], pDB->z, pDB->n);
- totalLen += pDB->n;
- }
-
- if (tableName != NULL) {
- if (pDB != NULL) {
- fullName[totalLen] = TS_PATH_DELIMITER[0];
- totalLen += 1;
-
- /* here we only check the table name length limitation */
- if (!tscValidateTableNameLength(tableName->n)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
- } else { // pDB == NULL, the db prefix name is specified in tableName
- /* the length limitation includes tablename + dbname + sep */
- if (tableName->n >= TSDB_TABLE_NAME_LEN + TSDB_DB_NAME_LEN) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
- }
-
- memcpy(&fullName[totalLen], tableName->z, tableName->n);
- totalLen += tableName->n;
- }
-
- if (xlen != NULL) {
- *xlen = totalLen;
- }
-
- if (totalLen < TSDB_TABLE_FNAME_LEN) {
- fullName[totalLen] = 0;
- }
-
- return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_OPERATION;
-}
-
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t tableUid) {
SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscColumnListInsert(pQueryInfo->colList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s);
@@ -1944,20 +1886,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
-bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
- if (pQueryInfo == NULL) {
- return false;
- }
- if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY
- && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) {
- return false;
- }
- if (tscNumOfExprs(pQueryInfo) == 1){
- return true;
- }
- return false;
-}
-
static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) {
size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < numOfExprs; ++i) {
@@ -2047,8 +1975,12 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const char* msg1 = "too many items in selection clause";
const char* msg2 = "functions or others can not be mixed up";
const char* msg3 = "not support query expression";
- const char* msg4 = "only support distinct one column or tag";
+ const char* msg4 = "not support distinct mixed with proj/agg func";
const char* msg5 = "invalid function name";
+ const char* msg6 = "not support distinct mixed with join";
+ const char* msg7 = "not support distinct mixed with groupby";
+ const char* msg8 = "not support distinct in nest query";
+ const char* msg9 = "_block_dist not support subquery, only support stable/table";
// too many result columns not support order by in query
if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) {
@@ -2059,19 +1991,30 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
+
bool hasDistinct = false;
+ bool hasAgg = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
+ int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i);
-
if (hasDistinct == false) {
- hasDistinct = (pItem->distinct == true);
- }
+ hasDistinct = (pItem->distinct == true);
+ distIdx = hasDistinct ? i : -1;
+ }
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
+ hasAgg = true;
+ if (hasDistinct) break;
+
pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
+
+ if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
+ }
+
SUdfInfo* pUdfInfo = NULL;
if (pItem->pNode->functionId < 0) {
pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
@@ -2106,10 +2049,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
}
+ //TODO(dengyihao), refactor as function
+ //handle distinct func mixed with other func
if (hasDistinct == true) {
- if (!isValidDistinctSql(pQueryInfo) ) {
+ if (distIdx != 0 || hasAgg) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ }
+ if (joinQuery) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
+ if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
+ if (pQueryInfo->pDownstream != NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
+
pQueryInfo->distinct = true;
}
@@ -2544,6 +2499,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_MAX:
case TSDB_FUNC_DIFF:
case TSDB_FUNC_DERIVATIVE:
+ case TSDB_FUNC_CEIL:
+ case TSDB_FUNC_FLOOR:
+ case TSDB_FUNC_ROUND:
case TSDB_FUNC_STDDEV:
case TSDB_FUNC_LEASTSQR: {
// 1. valid the number of parameters
@@ -2594,13 +2552,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for diff query
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
- colIndex += 1;
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
SColumnList ids = createColumnList(1, 0, 0);
- insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
@@ -2634,7 +2591,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
- }
+ }
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
@@ -2668,8 +2625,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
-
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
+
return TSDB_CODE_SUCCESS;
}
@@ -2735,6 +2692,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
+ if (pParamElem->pNode->columnName.z == NULL) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
// functions can not be applied to tags
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
@@ -2873,7 +2834,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
- insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
+ insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts
@@ -3051,7 +3012,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
}
}
-
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
return TSDB_CODE_SUCCESS;
}
@@ -3469,6 +3429,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
int32_t scalarUdf = 0;
int32_t prjNum = 0;
int32_t aggNum = 0;
+ int32_t scalNum = 0;
size_t numOfExpr = tscNumOfExprs(pQueryInfo);
assert(numOfExpr > 0);
@@ -3500,6 +3461,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
++prjNum;
}
+ if (functionId == TSDB_FUNC_CEIL || functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND) {
+ ++scalNum;
+ }
+
if (functionId == TSDB_FUNC_PRJ && (pExpr1->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX || TSDB_COL_IS_UD_COL(pExpr1->base.colInfo.flag))) {
continue;
}
@@ -3521,15 +3486,19 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool
}
}
- aggNum = (int32_t)size - prjNum - aggUdf - scalarUdf;
+ aggNum = (int32_t)size - prjNum - scalNum - aggUdf - scalarUdf;
assert(aggNum >= 0);
- if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalarUdf > 0)) {
+ if (aggUdf > 0 && (prjNum > 0 || aggNum > 0 || scalNum > 0 || scalarUdf > 0)) {
+ return false;
+ }
+
+ if (scalarUdf > 0 && (aggNum > 0 || scalNum > 0)) {
return false;
}
- if (scalarUdf > 0 && aggNum > 0) {
+ if (aggNum > 0 && scalNum > 0) {
return false;
}
@@ -3716,16 +3685,13 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
}
}
- if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) {
- retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false);
-
- // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
- } else if (pExpr->tokenId == TK_IN) {
+ if (pExpr->tokenId == TK_IN) {
tVariant *pVal;
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->Expr.paramList, &pVal, colType, timePrecision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
- pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
+
+ pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen);
pColumnFilter->len = pVal->nLen;
pColumnFilter->filterstr = 1;
memcpy((char *)(pColumnFilter->pz), (char *)(pVal->pz), pVal->nLen);
@@ -3745,6 +3711,10 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
size_t len = twcslen((wchar_t*)pColumnFilter->pz);
pColumnFilter->len = len * TSDB_NCHAR_SIZE;
+ } else if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) {
+ retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false);
+
+ // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
} else {
retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->lowerBndd, colType, false);
}
@@ -3775,6 +3745,12 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
case TK_LIKE:
pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE;
break;
+ case TK_MATCH:
+ pColumnFilter->lowerRelOptr = TSDB_RELATION_MATCH;
+ break;
+ case TK_NMATCH:
+ pColumnFilter->lowerRelOptr = TSDB_RELATION_NMATCH;
+ break;
case TK_ISNULL:
pColumnFilter->lowerRelOptr = TSDB_RELATION_ISNULL;
break;
@@ -3797,9 +3773,6 @@ typedef struct SCondExpr {
tSqlExpr* pColumnCond;
- tSqlExpr* pTableCond;
- int16_t relType; // relation between table name in expression and other tag
- // filter condition expression, TK_AND or TK_OR
int16_t tableCondIndex;
tSqlExpr* pJoinExpr; // join condition
@@ -3808,43 +3781,6 @@ typedef struct SCondExpr {
static int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t timePrecision);
-static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) {
- SArray* pList = pExpr->Expr.paramList;
-
- int32_t size = (int32_t) taosArrayGetSize(pList);
- if (size <= 0) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
-
- if (size > 0) {
- taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
- }
-
- for (int32_t i = 0; i < size; ++i) {
- tSqlExprItem* pSub = taosArrayGet(pList, i);
- tVariant* pVar = &pSub->pNode->value;
-
- taosStringBuilderAppendStringLen(sb, pVar->pz, pVar->nLen);
-
- if (i < size - 1) {
- taosStringBuilderAppendString(sb, TBNAME_LIST_SEP);
- }
-
- if (pVar->nLen <= 0 || !tscValidateTableNameLength(pVar->nLen)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tablenameCondToString(tSqlExpr* pExpr, SStringBuilder* sb) {
- taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN);
- taosStringBuilderAppendString(sb, pExpr->value.pz);
-
- return TSDB_CODE_SUCCESS;
-}
-
enum {
TSQL_EXPR_TS = 1,
TSQL_EXPR_TAG = 2,
@@ -3861,8 +3797,7 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
int32_t ret = 0;
- const char* msg1 = "non binary column not support like operator";
- const char* msg2 = "binary column not support this operator";
+ const char* msg1 = "non binary column not support like/match operator";
const char* msg3 = "bool column not support this operator";
const char* msg4 = "primary key not support this operator";
@@ -3883,18 +3818,8 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol
pColFilter->filterstr =
((pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0);
- if (pColFilter->filterstr) {
- if (pExpr->tokenId != TK_EQ
- && pExpr->tokenId != TK_NE
- && pExpr->tokenId != TK_ISNULL
- && pExpr->tokenId != TK_NOTNULL
- && pExpr->tokenId != TK_LIKE
- && pExpr->tokenId != TK_IN) {
- ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
- goto _err_ret;
- }
- } else {
- if (pExpr->tokenId == TK_LIKE) {
+ if (!pColFilter->filterstr) {
+ if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) {
ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
goto _err_ret;
}
@@ -3923,40 +3848,6 @@ _err_ret:
return ret;
}
-static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) {
- const char* msg0 = "invalid table name list";
- const char* msg1 = "not string following like";
-
- if (pTableCond == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
- tSqlExpr* pLeft = pTableCond->pLeft;
- tSqlExpr* pRight = pTableCond->pRight;
-
- if (!isTablenameToken(&pLeft->columnName)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
-
- int32_t ret = TSDB_CODE_SUCCESS;
-
- if (pTableCond->tokenId == TK_IN) {
- ret = tablenameListToString(pRight, sb);
- } else if (pTableCond->tokenId == TK_LIKE) {
- if (pRight->tokenId != TK_STRING) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
- }
-
- ret = tablenameCondToString(pRight, sb);
- }
-
- if (ret != TSDB_CODE_SUCCESS) {
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
- }
-
- return ret;
-}
-
static int32_t getColQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr) {
int32_t ret = TSDB_CODE_SUCCESS;
@@ -4079,8 +3970,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema1->colId, pTableMetaInfo->pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema1);
+ atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
- if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
+ if (pTableMetaInfo->joinTagNum > 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -4112,7 +4004,9 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
if (tscColumnExists(pTableMetaInfo->tagColList, pTagSchema2->colId, pTableMeta->id.uid) < 0) {
tscColumnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->id.uid, pTagSchema2);
- if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) {
+ atomic_add_fetch_32(&pTableMetaInfo->joinTagNum, 1);
+
+ if (pTableMetaInfo->joinTagNum > 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -4395,18 +4289,6 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr
return true;
}
-static bool validTableNameOptr(tSqlExpr* pExpr) {
- const char nameFilterOptr[] = {TK_IN, TK_LIKE};
-
- for (int32_t i = 0; i < tListLen(nameFilterOptr); ++i) {
- if (pExpr->tokenId == nameFilterOptr[i]) {
- return true;
- }
- }
-
- return false;
-}
-
static int32_t setExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, const char* msg, int32_t parentOptr, char* msgBuf) {
if (*parent != NULL) {
if (parentOptr == TK_OR && msg != NULL) {
@@ -4488,6 +4370,58 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
return TSDB_CODE_SUCCESS;
}
+// check for match expression
+static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
+ const char* msg1 = "regular expression string should be less than %d characters";
+ const char* msg2 = "illegal column type for match/nmatch";
+ const char* msg3 = "invalid regular expression";
+
+ tSqlExpr* pLeft = pExpr->pLeft;
+ tSqlExpr* pRight = pExpr->pRight;
+
+ if (pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) {
+ if (pRight->value.nLen > tsMaxRegexStringLen) {
+ char tmp[64] = {0};
+ sprintf(tmp, msg1, tsMaxRegexStringLen);
+ return invalidOperationMsg(msgBuf, tmp);
+ }
+
+ SSchema* pSchema = tscGetTableSchema(pTableMeta);
+ if ((!isTablenameToken(&pLeft->columnName)) &&(pSchema[index].type != TSDB_DATA_TYPE_BINARY)) {
+ return invalidOperationMsg(msgBuf, msg2);
+ }
+
+ if (!(pRight->type == SQL_NODE_VALUE && pRight->value.nType == TSDB_DATA_TYPE_BINARY)) {
+ return invalidOperationMsg(msgBuf, msg3);
+ }
+
+ int errCode = 0;
+ regex_t regex;
+ char regErrBuf[256] = {0};
+
+ //remove the quote at the begin end of original sql string.
+ uint32_t lenPattern = pRight->exprToken.n - 2;
+ char* pattern = malloc(lenPattern + 1);
+ strncpy(pattern, pRight->exprToken.z+1, lenPattern);
+ pattern[lenPattern] = '\0';
+
+ tfree(pRight->value.pz);
+ pRight->value.pz = pattern;
+ pRight->value.nLen = lenPattern;
+
+ int cflags = REG_EXTENDED;
+ if ((errCode = regcomp(®ex, pattern, cflags)) != 0) {
+ regerror(errCode, ®ex, regErrBuf, sizeof(regErrBuf));
+ tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf);
+ return invalidOperationMsg(msgBuf, msg3);
+ }
+ regfree(®ex);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) {
tSqlExpr* left = tSqlExprClone(expr);
tSqlExpr* right = expr;
@@ -4507,8 +4441,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
const char* msg2 = "illegal column name";
const char* msg4 = "too many join tables";
const char* msg5 = "not support ordinary column join";
- const char* msg6 = "only one query condition on tbname allowed";
- const char* msg7 = "only in/like allowed in filter table name";
tSqlExpr* pLeft = (*pExpr)->pLeft;
tSqlExpr* pRight = (*pExpr)->pRight;
@@ -4539,6 +4471,12 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return code;
}
+ // validate the match expression
+ code = validateMatchExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd));
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
@@ -4619,54 +4557,30 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- // in case of in operator, keep it in a seprate attribute
- if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- if (!validTableNameOptr(*pExpr)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
- }
-
- if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
}
- if (pCondExpr->pTableCond == NULL) {
- pCondExpr->pTableCond = *pExpr;
- pCondExpr->relType = parentOptr;
- pCondExpr->tableCondIndex = index.tableIndex;
- } else {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
+ ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
+ *pExpr = NULL;
+ if (type) {
+ *type |= TSQL_EXPR_JOIN;
+ }
+ } else {
+ // do nothing
+ // ret = setExprToCond(pCmd, &pCondExpr->pTagCond,
+ // *pExpr, NULL, parentOptr);
+ tSqlExpr *rexpr = NULL;
+ if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY && pSchema->type != TSDB_DATA_TYPE_NCHAR && pSchema->type != TSDB_DATA_TYPE_BOOL)) {
+ handleNeOptr(&rexpr, *pExpr);
+ *pExpr = rexpr;
}
-
+
if (type) {
*type |= TSQL_EXPR_TAG;
}
- *pExpr = NULL;
- } else {
- if (pRight != NULL && pRight->tokenId == TK_ID) { // join on tag columns for stable query
- if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
- }
-
- pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
- ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
- *pExpr = NULL;
- if (type) {
- *type |= TSQL_EXPR_JOIN;
- }
- } else {
- // do nothing
- // ret = setExprToCond(pCmd, &pCondExpr->pTagCond,
- // *pExpr, NULL, parentOptr);
- tSqlExpr *rexpr = NULL;
- if ((*pExpr)->tokenId == TK_NE && (pSchema->type != TSDB_DATA_TYPE_BINARY && pSchema->type != TSDB_DATA_TYPE_NCHAR && pSchema->type != TSDB_DATA_TYPE_BOOL)) {
- handleNeOptr(&rexpr, *pExpr);
- *pExpr = rexpr;
- }
-
- if (type) {
- *type |= TSQL_EXPR_TAG;
- }
- }
}
} else { // query on other columns
if (type) {
@@ -4853,80 +4767,6 @@ int tableNameCompar(const void* lhs, const void* rhs) {
return ret > 0 ? 1 : -1;
}
-static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, const char* account,
- tSqlExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) {
- const char* msg = "table name too long";
-
- if (pExpr == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
- STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableCondIndex);
-
- STagCond* pTagCond = &pQueryInfo->tagCond;
- pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->id.uid;
-
- assert(pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_IN);
-
- if (pExpr->tokenId == TK_LIKE) {
- char* str = taosStringBuilderGetResult(sb, NULL);
- pQueryInfo->tagCond.tbnameCond.cond = strdup(str);
- pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str);
- return TSDB_CODE_SUCCESS;
- }
-
- SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1));
- taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
-
- // remove the duplicated input table names
- int32_t num = 0;
- char* tableNameString = taosStringBuilderGetResult(sb, NULL);
-
- char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num);
- qsort(segments, num, POINTER_BYTES, tableNameCompar);
-
- int32_t j = 1;
- for (int32_t i = 1; i < num; ++i) {
- if (strcmp(segments[i], segments[i - 1]) != 0) {
- segments[j++] = segments[i];
- }
- }
- num = j;
-
- char name[TSDB_DB_NAME_LEN] = {0};
- tNameGetDbName(&pTableMetaInfo->name, name);
- SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
-
- for (int32_t i = 0; i < num; ++i) {
- if (i >= 1) {
- taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1);
- }
-
- char idBuf[TSDB_TABLE_FNAME_LEN] = {0};
- int32_t xlen = (int32_t)strlen(segments[i]);
- SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
-
- int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen);
- if (ret != TSDB_CODE_SUCCESS) {
- taosStringBuilderDestroy(&sb1);
- tfree(segments);
-
- invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
- return ret;
- }
-
- taosStringBuilderAppendString(&sb1, idBuf);
- }
-
- char* str = taosStringBuilderGetResult(&sb1, NULL);
- pQueryInfo->tagCond.tbnameCond.cond = strdup(str);
- pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str);
-
- taosStringBuilderDestroy(&sb1);
- tfree(segments);
- return TSDB_CODE_SUCCESS;
-}
-
int32_t mergeTimeRange(SSqlCmd* pCmd, STimeWindow* res, STimeWindow* win, int32_t optr) {
const char* msg0 = "only one time stamp window allowed";
@@ -5066,14 +4906,6 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
}
static void cleanQueryExpr(SCondExpr* pCondExpr) {
- if (pCondExpr->pTableCond) {
- tSqlExprDestroy(pCondExpr->pTableCond);
- }
-
- if (pCondExpr->pTagCond) {
- tSqlExprDestroy(pCondExpr->pTagCond);
- }
-
if (pCondExpr->pColumnCond) {
tSqlExprDestroy(pCondExpr->pColumnCond);
}
@@ -5369,7 +5201,7 @@ static int32_t getQueryTimeRange(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr
//multiple tables's query time range mixed together
tExprNode* p = NULL;
- SFilterInfo *filter = NULL;
+ void *filter = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
ret = exprTreeFromSqlExpr(pCmd, &p, *pExpr, pQueryInfo, colList, NULL);
@@ -5411,7 +5243,6 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
int32_t ret = TSDB_CODE_SUCCESS;
// tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
- SStringBuilder sb; memset(&sb, 0, sizeof(sb));
SCondExpr condExpr = {0};
if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) {
@@ -5444,12 +5275,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
condExpr.pTagCond = (*pExpr);
*pExpr = NULL;
- // 1. check if it is a join query
+ // check if it is a join query
if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
- // 2. get the query time range
+ // get the query time range
if ((ret = convertTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
@@ -5457,19 +5288,13 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if ((ret = getQueryTimeRange(&pSql->cmd, pQueryInfo, &condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
-
- // 3. get the tag query condition
+ // get the tag query condition
if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
- // 4. get the table name query condition
- if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
- goto PARSE_WHERE_EXIT;
- }
-
- // 5. other column query condition
+ // other column query condition
if ((ret = checkColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
@@ -5478,21 +5303,11 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
goto PARSE_WHERE_EXIT;
}
-
- // 6. join condition
+ // join condition
if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
goto PARSE_WHERE_EXIT;
}
- // 7. query condition for table name
- pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR;
-
- ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
- taosStringBuilderDestroy(&sb);
- if (ret) {
- goto PARSE_WHERE_EXIT;
- }
-
//if (!validateFilterExpr(pQueryInfo)) {
// ret = invalidOperationMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
// goto PARSE_WHERE_EXIT;
@@ -5563,6 +5378,10 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
pRight->flags &= ~(1 << EXPR_FLAG_NS_TIMESTAMP);
}
+ if (pRight->value.nType == -1) {
+ return TSDB_CODE_TSC_INVALID_OPERATION;
+ }
+
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
}
@@ -5630,6 +5449,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg3 = "top/bottom not support fill";
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
+ const char* msg6 = "not supported function now";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
@@ -5668,6 +5488,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
}
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV;
+ if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@@ -5746,14 +5569,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
if (isTopBottomQuery(pQueryInfo)) {
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
- } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column
- pQueryInfo->order.orderColId = INT32_MIN;
+ } else { // in case of select tbname from super_table, the default order column can not be the primary ts column
+ pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro
}
/* for super table query, set default ascending order for group output */
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
}
+
+ if (pQueryInfo->distinct) {
+ pQueryInfo->order.order = TSDB_ORDER_ASC;
+ pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
+ }
}
int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) {
@@ -5761,26 +5589,21 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg1 = "invalid column name in orderby clause";
const char* msg2 = "too many order by columns";
const char* msg3 = "only primary timestamp/tbname/first tag in groupby clause allowed";
- const char* msg4 = "only tag in groupby clause allowed in order by";
- const char* msg5 = "only primary timestamp/column in top/bottom function allowed as orderby column";
- const char* msg6 = "only primary timestamp allowed as the second orderby column";
- const char* msg7 = "only primary timestamp/column in groupby clause allowed as orderby column";
- const char* msg8 = "only column in groupby clause allowed as orderby column";
+ const char* msg4 = "only tag in groupby clause allowed in order clause";
+ const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column";
+ const char* msg6 = "only primary timestamp allowed as the second order column";
+ const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
+ const char* msg8 = "only column in groupby clause allowed as order column";
+ const char* msg9 = "orderby column must projected in subquery";
+ const char* msg10 = "not support distinct mixed with order by";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
-
- if (pQueryInfo->distinct == true) {
- pQueryInfo->order.order = TSDB_ORDER_ASC;
- pQueryInfo->order.orderColId = 0;
- return TSDB_CODE_SUCCESS;
- }
if (pSqlNode->pSortOrder == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
- SArray* pSortorder = pSqlNode->pSortOrder;
+ return TSDB_CODE_SUCCESS;
+ }
+ char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
+ SArray* pSortOrder = pSqlNode->pSortOrder;
/*
* for table query, there is only one or none order option is allowed, which is the
@@ -5788,19 +5611,22 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
*
* for super table query, the order option must be less than 3.
*/
- size_t size = taosArrayGetSize(pSortorder);
- if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
+ size_t size = taosArrayGetSize(pSortOrder);
+ if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) {
if (size > 1) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
+ return invalidOperationMsg(pMsgBuf, msg0);
}
} else {
if (size > 2) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ return invalidOperationMsg(pMsgBuf, msg2);
}
}
+ if (size > 0 && pQueryInfo->distinct) {
+ return invalidOperationMsg(pMsgBuf, msg10);
+ }
// handle the first part of order by
- tVariant* pVar = taosArrayGet(pSortorder, 0);
+ tVariant* pVar = taosArrayGet(pSortOrder, 0);
// e.g., order by 1 asc, return directly with out further check.
if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) {
@@ -5812,7 +5638,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(pMsgBuf, msg1);
}
bool orderByTags = false;
@@ -5824,7 +5650,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
+ return invalidOperationMsg(pMsgBuf, msg4);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@@ -5845,13 +5671,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
orderByGroupbyCol = true;
}
}
+
if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
+ return invalidOperationMsg(pMsgBuf, msg3);
} else { // order by top/bottom result value column is not supported in case of interval query.
assert(!(orderByTags && orderByTS && orderByGroupbyCol));
}
- size_t s = taosArrayGetSize(pSortorder);
+ size_t s = taosArrayGetSize(pSortOrder);
if (s == 1) {
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@@ -5865,12 +5692,16 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) {
/* order of top/bottom query in interval is not valid */
- SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+
+ int32_t pos = tscExprTopBottomIndex(pQueryInfo);
+ assert(pos > 0);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
- pExpr = tscExprGet(pQueryInfo, 1);
+ pExpr = tscExprGet(pQueryInfo, pos);
+
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(pMsgBuf, msg5);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
@@ -5885,12 +5716,21 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
// orderby ts query on super table
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
- addPrimaryTsColIntoResult(pQueryInfo, pCmd);
+ bool found = false;
+ for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
+ found = true;
+ break;
+ }
+ }
+ if (!found && pQueryInfo->pDownstream) {
+ return invalidOperationMsg(pMsgBuf, msg9);
+ }
+ addPrimaryTsColIntoResult(pQueryInfo, pCmd);
}
}
- }
-
- if (s == 2) {
+ } else {
tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
if (orderByTags) {
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
@@ -5907,22 +5747,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
tVariant* pVar2 = &pItem->pVar;
SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ return invalidOperationMsg(pMsgBuf, msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ return invalidOperationMsg(pMsgBuf, msg6);
} else {
- tVariantListItem* p1 = taosArrayGet(pSortorder, 1);
+ tVariantListItem* p1 = taosArrayGet(pSortOrder, 1);
pQueryInfo->order.order = p1->sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
- } else { // meter query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
+ } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
}
+
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
@@ -5930,34 +5771,35 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
validOrder = (pColIndex->colIndex == index.columnIndex);
}
+
if (!validOrder) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ return invalidOperationMsg(pMsgBuf, msg7);
}
+
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
-
}
if (isTopBottomQuery(pQueryInfo)) {
- bool validOrder = false;
SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo;
if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) {
SColIndex* pColIndex = taosArrayGet(columnInfo, 0);
- validOrder = (pColIndex->colIndex == index.columnIndex);
- if (!validOrder) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
+
+ if (pColIndex->colIndex == index.columnIndex) {
+ return invalidOperationMsg(pMsgBuf, msg8);
}
} else {
- /* order of top/bottom query in interval is not valid */
- SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
+ int32_t pos = tscExprTopBottomIndex(pQueryInfo);
+ assert(pos > 0);
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
- pExpr = tscExprGet(pQueryInfo, 1);
+ pExpr = tscExprGet(pQueryInfo, pos);
+
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
+ return invalidOperationMsg(pMsgBuf, msg5);
}
- validOrder = true;
}
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
@@ -5967,6 +5809,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return TSDB_CODE_SUCCESS;
}
+ tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
+ pQueryInfo->order.order = pItem->sortOrder;
+ pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
+ } else {
+ // handle the temp table order by clause. You can order by any single column in case of the temp table, created by
+ // inner subquery.
+ assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1);
+
+ if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) {
+ return invalidOperationMsg(pMsgBuf, msg1);
+ }
+
tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->order.order = pItem->sortOrder;
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
@@ -6001,7 +5855,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg19 = "invalid new tag name";
const char* msg20 = "table is not super table";
const char* msg21 = "only binary/nchar column length could be modified";
- const char* msg22 = "new column length should be bigger than old one";
const char* msg23 = "only column length coulbe be modified";
const char* msg24 = "invalid binary/nchar column length";
@@ -6053,8 +5906,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
- if (!validateOneTags(pCmd, p)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
+ int32_t ret = validateOneTag(pCmd, p);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
}
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
@@ -6231,8 +6085,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
- if (!validateOneColumn(pCmd, p)) {
- return TSDB_CODE_TSC_INVALID_OPERATION;
+ int32_t ret = validateOneColumn(pCmd, p);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
}
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
@@ -6295,7 +6150,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
if (pItem->bytes <= pColSchema->bytes) {
- return invalidOperationMsg(pMsg, msg22);
+ return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, pMsg, pItem->name, NULL);
}
SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
@@ -6346,7 +6201,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
if (pItem->bytes <= pColSchema->bytes) {
- return invalidOperationMsg(pMsg, msg22);
+ return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_TAG_LENGTH, pMsg, pItem->name, NULL);
}
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
@@ -6429,7 +6284,9 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
}
int32_t f = pExpr->base.functionId;
- if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE) {
+ if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE ||
+ f == TSDB_FUNC_CEIL || f == TSDB_FUNC_FLOOR || f == TSDB_FUNC_ROUND)
+ {
isProjectionFunction = true;
break;
}
@@ -7031,6 +6888,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
const char* msg2 = "aggregation function should not be mixed up with projection";
bool tagTsColExists = false;
+ int16_t numOfScalar = 0;
int16_t numOfSelectivity = 0;
int16_t numOfAggregation = 0;
@@ -7064,6 +6922,8 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
numOfSelectivity++;
+ } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) {
+ numOfScalar++;
} else {
numOfAggregation++;
}
@@ -7136,7 +6996,6 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
const char* msg1 = "interval not allowed in group by normal column";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
SSchema* tagSchema = NULL;
@@ -7162,9 +7021,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
s = &pSchema[colIndex];
}
}
-
- size_t size = tscNumOfExprs(pQueryInfo);
-
+
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
int32_t f = TSDB_FUNC_TAG;
@@ -7172,8 +7029,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
f = TSDB_FUNC_TAGPRJ;
}
+ int32_t pos = tscGetFirstInvisibleFieldPos(pQueryInfo);
+
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
- SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
+ SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
@@ -7183,13 +7042,15 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
// NOTE: tag column does not add to source column list
SColumnList ids = createColumnList(1, 0, pColIndex->colIndex);
- insertResultField(pQueryInfo, (int32_t)size, &ids, s->bytes, (int8_t)s->type, s->name, pExpr);
+ insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, s->name, pExpr);
} else {
// if this query is "group by" normal column, time window query is not allowed
if (isTimeWindowQuery(pQueryInfo)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
+ size_t size = tscNumOfExprs(pQueryInfo);
+
bool hasGroupColumn = false;
for (int32_t j = 0; j < size; ++j) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, j);
@@ -8078,7 +7939,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect
}
static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t sqlOptr) {
- const char* msg1 = "non binary column not support like operator";
+ const char* msg1 = "non binary column not support like/match operator";
const char* msg2 = "invalid operator for binary column in having clause";
const char* msg3 = "invalid operator for bool column in having clause";
@@ -8130,11 +7991,13 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, S
&& pExpr->tokenId != TK_ISNULL
&& pExpr->tokenId != TK_NOTNULL
&& pExpr->tokenId != TK_LIKE
+ && pExpr->tokenId != TK_MATCH
+ && pExpr->tokenId != TK_NMATCH
) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
- if (pExpr->tokenId == TK_LIKE) {
+ if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@@ -8417,7 +8280,10 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
size_t len = strlen(name);
- taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity);
+ if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity)) {
+ // not found
+ tfree(pTableMeta);
+ }
if (pTableMeta && pTableMeta->id.uid > 0) {
tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
@@ -8588,7 +8454,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
if (p->vgroupIdList != NULL) {
size_t s = taosArrayGetSize(p->vgroupIdList);
- size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo);
+ size_t vgroupsz = sizeof(SVgroupMsg) * s + sizeof(SVgroupsInfo);
pTableMetaInfo->vgroupList = calloc(1, vgroupsz);
if (pTableMetaInfo->vgroupList == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -8603,14 +8469,11 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod
taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo);
assert(existVgroupInfo.inUse >= 0);
- SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j];
+ SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j];
pVgroup->numOfEps = existVgroupInfo.numOfEps;
pVgroup->vgId = existVgroupInfo.vgId;
- for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) {
- pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port;
- pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN);
- }
+ memcpy(&pVgroup->epAddr, &existVgroupInfo.ep, sizeof(pVgroup->epAddr));
}
}
}
@@ -8648,6 +8511,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
n += 1;
}
+ info->numOfColumns = n;
+
return meta;
}
@@ -8659,6 +8524,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
if (taosArrayGetSize(subInfo->pSubquery) >= 2) {
return invalidOperationMsg(msgBuf, "not support union in subquery");
}
+
SQueryInfo* pSub = calloc(1, sizeof(SQueryInfo));
tscInitQueryInfo(pSub);
@@ -8670,18 +8536,18 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
pSub->pUdfInfo = pUdfInfo;
pSub->udfCopy = true;
+ pSub->pDownstream = pQueryInfo;
int32_t code = validateSqlNode(pSql, p, pSub);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- pSub->pDownstream = pQueryInfo;
-
// create dummy table meta info
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
if (pTableMetaInfo1 == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
@@ -8734,8 +8600,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select";
- int32_t code = TSDB_CODE_SUCCESS;
-
+ int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -8748,7 +8613,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
* select server_status();
* select server_version();
* select client_version();
- * select current_database();
+ * select database();
*/
if (pSqlNode->from == NULL) {
assert(pSqlNode->fillType == NULL && pSqlNode->pGroupby == NULL && pSqlNode->pWhere == NULL &&
@@ -8757,7 +8622,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
if (pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
- clearAllTableMetaInfo(pQueryInfo, false);
+ clearAllTableMetaInfo(pQueryInfo, false, pSql->self);
pQueryInfo->numOfTables = 0;
// parse the subquery in the first place
@@ -8766,7 +8631,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
// check if there is 3 level select
SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
- if (p->from->type == SQL_NODE_FROM_SUBQUERY){
+ if (p->from->type == SQL_NODE_FROM_SUBQUERY) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
@@ -8784,6 +8649,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pCmd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, timeWindowQuery, true) !=
TSDB_CODE_SUCCESS) {
@@ -8858,6 +8724,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
}
+ // disable group result mixed up if interval/session window query exists.
+ if (isTimeWindowQuery(pQueryInfo)) {
+ size_t num = taosArrayGetSize(pQueryInfo->pUpstream);
+ for(int32_t i = 0; i < num; ++i) {
+ SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, i);
+ pUp->multigroupResult = false;
+ }
+ }
+
// parse the having clause in the first place
int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
@@ -9026,8 +8901,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
- //pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0);
-
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
@@ -9089,13 +8962,17 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
(*pExpr)->pVal = calloc(1, sizeof(tVariant));
tVariantAssign((*pExpr)->pVal, &pSqlExpr->value);
- STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
- if (pCols != NULL && taosArrayGetSize(pCols) > 0) {
- SColIndex* idx = taosArrayGet(pCols, 0);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
- // convert time by precision
- if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) {
- ret = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, (*pExpr)->pVal);
+ STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, pQueryInfo->curTableIdx)->pTableMeta;
+ if (pCols != NULL) {
+ size_t colSize = taosArrayGetSize(pCols);
+
+ if (colSize > 0) {
+ SColIndex* idx = taosArrayGet(pCols, colSize - 1);
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
+ // convert time by precision
+ if (pSchema != NULL && TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && TSDB_DATA_TYPE_BINARY == (*pExpr)->pVal->nType) {
+ ret = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, (*pExpr)->pVal);
+ }
}
}
return ret;
@@ -9138,8 +9015,18 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
(*pExpr)->nodeType = TSQL_NODE_COL;
(*pExpr)->pSchema = calloc(1, sizeof(SSchema));
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
- *(*pExpr)->pSchema = *pSchema;
+ SSchema* pSchema = NULL;
+
+ if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ pSchema = (*pExpr)->pSchema;
+ strcpy(pSchema->name, TSQL_TBNAME_L);
+ pSchema->type = TSDB_DATA_TYPE_BINARY;
+ pSchema->colId = TSDB_TBNAME_COLUMN_INDEX;
+ pSchema->bytes = -1;
+ } else {
+ pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
+ *(*pExpr)->pSchema = *pSchema;
+ }
if (pCols != NULL) { // record the involved columns
SColIndex colIndex = {0};
@@ -9160,9 +9047,13 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
if (colSize > 0) {
SColIndex* idx = taosArrayGet(pCols, colSize - 1);
- SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
- if (pSchema != NULL) {
- colType = pSchema->type;
+ if (idx->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
+ colType = TSDB_DATA_TYPE_BINARY;
+ } else {
+ SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
+ if (pSchema != NULL) {
+ colType = pSchema->type;
+ }
}
}
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index f0ee180bbe490851754e63bb489cdd5f9ad60afb..dcfbc857d5d6792b3796a098ea61046439fc5d0f 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -73,7 +73,7 @@ static int32_t removeDupVgid(int32_t *src, int32_t sz) {
return ret;
}
-static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
+static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupMsg* pVgroupInfo) {
assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
// Issue the query to one of the vnode among a vgroup randomly.
@@ -93,6 +93,7 @@ static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
existed = true;
}
}
+
assert(existed);
}
@@ -331,22 +332,42 @@ int tscSendMsgToServer(SSqlObj *pSql) {
.handle = NULL,
.code = 0
};
-
rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid);
return TSDB_CODE_SUCCESS;
}
-static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
- SRpcMsg* rpcMsg = pSchedMsg->ahandle;
- SRpcEpSet* pEpSet = pSchedMsg->thandle;
+// handle three situation
+// 1. epset retry, only return last failure ep
+// 2. no epset retry, like 'taos -h invalidFqdn', return invalidFqdn
+// 3. other situation, no expected
+void tscSetFqdnErrorMsg(SSqlObj* pSql, SRpcEpSet* pEpSet) {
+ SSqlCmd* pCmd = &pSql->cmd;
+ SSqlRes* pRes = &pSql->res;
+
+ char* msgBuf = tscGetErrorMsgPayload(pCmd);
+
+ if (pEpSet) {
+ sprintf(msgBuf, "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
+ } else if (pCmd->command >= TSDB_SQL_MGMT) {
+ SRpcEpSet tEpset;
+
+ SRpcCorEpSet *pCorEpSet = pSql->pTscObj->tscCorMgmtEpSet;
+ taosCorBeginRead(&pCorEpSet->version);
+ tEpset = pCorEpSet->epSet;
+ taosCorEndRead(&pCorEpSet->version);
+
+ sprintf(msgBuf, "%s\"%s\"", tstrerror(pRes->code),tEpset.fqdn[(tEpset.inUse)%(tEpset.numOfEps)]);
+ } else {
+ sprintf(msgBuf, "%s", tstrerror(pRes->code));
+ }
+}
+void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
if (pSql == NULL) {
rpcFreeCont(rpcMsg->pCont);
- free(rpcMsg);
- free(pEpSet);
return;
}
@@ -357,28 +378,23 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
SSqlCmd *pCmd = &pSql->cmd;
pSql->rpcRid = -1;
-
if (pObj->signature != pObj) {
tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature);
taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
- free(rpcMsg);
- free(pEpSet);
return;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) {
tscDebug("0x%"PRIx64" sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
- pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
+ pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
taosRemoveRef(tscObjRef, handle);
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
- free(rpcMsg);
- free(pEpSet);
return;
}
@@ -407,9 +423,9 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
// 1. super table subquery
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
- TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
- !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
- (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) {
+ TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
+ !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
+ (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
// do nothing in case of super table subquery
} else {
pSql->retry += 1;
@@ -432,8 +448,6 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
- free(rpcMsg);
- free(pEpSet);
return;
}
}
@@ -485,7 +499,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
pRes->numOfRows += pMsg->affectedRows;
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql->self, sqlCmd[pCmd->command],
- tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen);
+ tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen);
} else {
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen);
}
@@ -500,16 +514,13 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
if (rpcMsg->code != TSDB_CODE_SUCCESS) {
pRes->code = rpcMsg->code;
}
+
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
- if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) {
- if (pEpSet) {
- char buf[TSDB_FQDN_LEN + 64] = {0};
- tscAllocPayload(pCmd, sizeof(buf));
- sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
- } else {
- sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code));
- }
+ if (rpcMsg->code == TSDB_CODE_RPC_FQDN_ERROR) {
+ tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64);
+ tscSetFqdnErrorMsg(pSql, pEpSet);
}
+
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
}
@@ -520,29 +531,6 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
taosReleaseRef(tscObjRef, handle);
rpcFreeCont(rpcMsg->pCont);
- free(rpcMsg);
- free(pEpSet);
-}
-
-void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
- SSchedMsg schedMsg = {0};
-
- schedMsg.fp = doProcessMsgFromServer;
-
- SRpcMsg* rpcMsgCopy = calloc(1, sizeof(SRpcMsg));
- memcpy(rpcMsgCopy, rpcMsg, sizeof(struct SRpcMsg));
- schedMsg.ahandle = (void*)rpcMsgCopy;
-
- SRpcEpSet* pEpSetCopy = NULL;
- if (pEpSet != NULL) {
- pEpSetCopy = calloc(1, sizeof(SRpcEpSet));
- memcpy(pEpSetCopy, pEpSet, sizeof(SRpcEpSet));
- }
-
- schedMsg.thandle = (void*)pEpSetCopy;
- schedMsg.msg = NULL;
-
- taosScheduleTask(tscQhandle, &schedMsg);
}
int doBuildAndSendMsg(SSqlObj *pSql) {
@@ -715,8 +703,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
}
}
- return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize +
- tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
+ return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize +
+ exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
}
static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg,
@@ -731,7 +719,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
- SVgroupInfo* pVgroupInfo = NULL;
+ SVgroupMsg* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
@@ -869,8 +857,8 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
(*pMsg) += sizeof(SSqlExpr);
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
- pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType);
- pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen);
+ pSqlExpr->param[j].nType = htonl(pExpr->param[j].nType);
+ pSqlExpr->param[j].nLen = htonl(pExpr->param[j].nLen);
if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen);
@@ -888,25 +876,30 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
+ SQueryInfo *pQueryInfo = NULL;
+ STableMeta *pTableMeta = NULL;
+ STableMetaInfo *pTableMetaInfo = NULL;
+
int32_t code = TSDB_CODE_SUCCESS;
int32_t size = tscEstimateQueryMsgSize(pSql);
+ assert(size > 0);
- if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
+ if (TSDB_CODE_SUCCESS != tscAllocPayloadFast(pCmd, size)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this
}
- SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
+ pQueryInfo = tscGetQueryInfo(pCmd);
+ pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+ pTableMeta = pTableMetaInfo->pTableMeta;
SQueryAttr query = {{0}};
tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql);
+ query.vgId = pTableMeta->vgId;
SArray* tableScanOperator = createTableScanPlan(&query);
SArray* queryOperator = createExecOperatorPlan(&query);
- STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
- STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
-
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
@@ -949,18 +942,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->pointInterpQuery = query.pointInterpQuery;
pQueryMsg->needReverseScan = query.needReverseScan;
pQueryMsg->stateWindow = query.stateWindow;
-
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->sqlstrLen = htonl(sqlLen);
pQueryMsg->sw.gap = htobe64(query.sw.gap);
pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX);
pQueryMsg->secondStageOutput = htonl(query.numOfExpr2);
- pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number
+ pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
- pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
- pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len);
pQueryMsg->queryType = htonl(pQueryInfo->type);
pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen);
@@ -976,7 +966,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tableCols[i].type = htons(pCol->type);
//pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters);
pQueryMsg->tableCols[i].flist.numOfFilters = 0;
-
+ pQueryMsg->tableCols[i].flist.filterInfo = 0;
// append the filter information after the basic column information
//serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg);
}
@@ -989,6 +979,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += pCond->len;
}
+ } else {
+ pQueryMsg->colCondLen = 0;
}
for (int32_t i = 0; i < query.numOfOutput; ++i) {
@@ -1068,6 +1060,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += pCond->len;
}
+ } else {
+ pQueryMsg->tagCondLen = 0;
}
if (pQueryInfo->bufLen > 0) {
@@ -1075,12 +1069,6 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += pQueryInfo->bufLen;
}
- SCond* pCond = &pQueryInfo->tagCond.tbnameCond;
- if (pCond->len > 0) {
- strncpy(pMsg, pCond->cond, pCond->len);
- pMsg += pCond->len;
- }
-
// compressed ts block
pQueryMsg->tsBuf.tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
@@ -1097,6 +1085,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder);
pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
+ } else {
+ pQueryMsg->tsBuf.tsLen = 0;
+ pQueryMsg->tsBuf.tsNumOfBlocks = 0;
}
int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator);
@@ -1134,6 +1125,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += pUdfInfo->contLen;
}
+ } else {
+ pQueryMsg->udfContentOffset = 0;
+ pQueryMsg->udfContentLen = 0;
}
memcpy(pMsg, pSql->sqlstr, sqlLen);
@@ -1411,7 +1405,6 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
}
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
- STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW;
pCmd->payloadLen = sizeof(SShowMsg) + 100;
@@ -1434,9 +1427,9 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
if (tNameIsEmpty(&pTableMetaInfo->name)) {
- pthread_mutex_lock(&pObj->mutex);
- tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
- pthread_mutex_unlock(&pObj->mutex);
+ char *p = cloneCurrentDBName(pSql);
+ tstrncpy(pShowMsg->db, p, sizeof(pShowMsg->db));
+ tfree(p);
} else {
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
@@ -2155,7 +2148,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
*size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg));
- size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
+ size_t vgroupsz = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
SVgroupsInfo *pVgroupInfo = calloc(1, vgroupsz);
assert(pVgroupInfo != NULL);
@@ -2165,7 +2158,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
} else {
for (int32_t j = 0; j < pVgroupInfo->numOfVgroups; ++j) {
// just init, no need to lock
- SVgroupInfo *pVgroup = &pVgroupInfo->vgroups[j];
+ SVgroupMsg *pVgroup = &pVgroupInfo->vgroups[j];
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
vmsg->vgId = htonl(vmsg->vgId);
@@ -2177,7 +2170,8 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t
pVgroup->vgId = vmsg->vgId;
for (int32_t k = 0; k < vmsg->numOfEps; ++k) {
pVgroup->epAddr[k].port = vmsg->epAddr[k].port;
- pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
+ tstrncpy(pVgroup->epAddr[k].fqdn, vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
+// pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN);
}
doUpdateVgroupInfo(pVgroup->vgId, vmsg);
@@ -2269,6 +2263,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
pMsg = buf;
}
+ if (pParentCmd->pTableMetaMap == NULL) {
+ pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ }
+
for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
int32_t code = tableMetaMsgConvert(pMetaMsg);
@@ -2605,7 +2603,7 @@ int tscProcessDropDbRsp(SSqlObj *pSql) {
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
- tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
+ tscRemoveCachedTableMeta(pTableMetaInfo, pSql->self);
tfree(pTableMetaInfo->pTableMeta);
return 0;
}
@@ -2623,7 +2621,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
tfree(pTableMetaInfo->pTableMeta);
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
- taosHashClear(tscTableMetaMap);
+ if (pSql->res.pRsp == NULL) {
+ tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name);
+ return 0;
+ }
+ return tscProcessTableMetaRsp(pSql);
}
return 0;
@@ -2656,6 +2658,53 @@ int tscProcessQueryRsp(SSqlObj *pSql) {
return 0;
}
+static void decompressQueryColData(SSqlObj *pSql, SSqlRes *pRes, SQueryInfo* pQueryInfo, char **data, int8_t compressed, int32_t compLen) {
+ int32_t decompLen = 0;
+ int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
+ int32_t *compSizes;
+ char *pData = *data;
+ compSizes = (int32_t *)(pData + compLen);
+
+ TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, numOfCols - 1);
+ int16_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1);
+ char *outputBuf = tcalloc(pRes->numOfRows, (pField->bytes + offset));
+
+ char *p = outputBuf;
+ int32_t bufOffset;
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
+ bufOffset = pInfo->field.bytes * pRes->numOfRows;
+
+ int32_t flen = (*(tDataTypes[pInfo->field.type].decompFunc))(pData, htonl(compSizes[i]), pRes->numOfRows, p, bufOffset,
+ compressed, NULL, 0);
+
+ p += flen;
+ decompLen +=flen;
+ pData += htonl(compSizes[i]);
+ }
+
+ /* Resize rsp as decompressed data will occupy more space */
+ pRes->rspLen = pRes->rspLen - (compLen + numOfCols * sizeof(int32_t)) + decompLen;
+ char *new_rsp = (char *)realloc(pRes->pRsp, pRes->rspLen);
+ if (new_rsp == NULL) {
+ pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ return;
+ } else {
+ pRes->pRsp = new_rsp;
+ *data = ((SRetrieveTableRsp *)pRes->pRsp)->data;
+ pData = *data + compLen + numOfCols * sizeof(int32_t);
+ }
+
+ tscDebug("0x%"PRIx64" decompress col data, compressed size:%d, decompressed size:%d",
+ pSql->self, (int32_t)(compLen + numOfCols * sizeof(int32_t)), decompLen);
+
+ int32_t tailLen = pRes->rspLen - sizeof(SRetrieveTableRsp) - decompLen;
+ memmove(*data + decompLen, pData, tailLen);
+ memmove(*data, outputBuf, decompLen);
+
+ tfree(outputBuf);
+}
+
int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
@@ -2668,18 +2717,24 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
return pRes->code;
}
- pRes->numOfRows = htonl(pRetrieve->numOfRows);
- pRes->precision = htons(pRetrieve->precision);
- pRes->offset = htobe64(pRetrieve->offset);
- pRes->useconds = htobe64(pRetrieve->useconds);
- pRes->completed = (pRetrieve->completed == 1);
- pRes->data = pRetrieve->data;
-
+ pRes->numOfRows = htonl(pRetrieve->numOfRows);
+ pRes->precision = htons(pRetrieve->precision);
+ pRes->offset = htobe64(pRetrieve->offset);
+ pRes->useconds = htobe64(pRetrieve->useconds);
+ pRes->completed = (pRetrieve->completed == 1);
+ pRes->data = pRetrieve->data;
+
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
return pRes->code;
}
+ //Decompress col data if compressed from server
+ if (pRetrieve->compressed) {
+ int32_t compLen = htonl(pRetrieve->compLen);
+ decompressQueryColData(pSql, pRes, pQueryInfo, &pRes->data, pRetrieve->compressed, compLen);
+ }
+
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if ((pCmd->command == TSDB_SQL_RETRIEVE) ||
((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) &&
@@ -2692,10 +2747,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
if (pSql->pSubscription != NULL) {
int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
-
+
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, numOfCols - 1);
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, numOfCols - 1);
-
+
char* p = pRes->data + (pField->bytes + offset) * pRes->numOfRows;
int32_t numOfTables = htonl(*(int32_t*)p);
@@ -2875,11 +2930,15 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
// just make runtime happy
if (pTableMetaInfo->tableMetaCapacity != 0 && pTableMetaInfo->pTableMeta != NULL) {
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
- }
- taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity);
+ }
+
+ if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity)) {
+ tfree(pTableMetaInfo->pTableMeta);
+ }
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
STableMeta* pSTMeta = (STableMeta *)(pSql->pBuf);
+
if (pMeta && pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
@@ -2889,6 +2948,8 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
}
+
+ tscDebug("0x%"PRIx64 " %s retrieve tableMeta from cache, numOfCols:%d, numOfTags:%d", pSql->self, name, pMeta->tableInfo.numOfColumns, pMeta->tableInfo.numOfTags);
return TSDB_CODE_SUCCESS;
}
@@ -2990,13 +3051,11 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
- // remove stored tableMeta info in hash table
- tscRemoveTableMetaBuf(pTableMetaInfo, pSql->self);
- pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
- pCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ // remove stored tableMeta info in hash table
+ tscResetSqlCmd(pCmd, true, pSql->self);
- SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
+ SArray* pNameList = taosArrayInit(1, POINTER_BYTES);
SArray* vgroupList = taosArrayInit(1, POINTER_BYTES);
char* n = strdup(name);
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 5f55e1c50d3ef75d6036a7d71419f142bff8cfb9..5fdaad0d667c19548f699a9a8cfed7c9f017ad1b 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -892,7 +892,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 2c4bc5f76463ee6bc811e9b6fa3631b534f64478..9f2b79e891ed303a891f87e40fc29802714a4f5a 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -113,7 +113,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
pQueryInfo->command = TSDB_SQL_SELECT;
- pSql->fp = tscProcessStreamQueryCallback;
+ pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
executeQuery(pSql, pQueryInfo);
tscIncStreamExecutionCount(pStream);
@@ -142,6 +142,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
if(pSql == NULL) {
return ;
}
+
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
tscDebug("0x%"PRIx64" add into timer", pSql->self);
@@ -186,14 +187,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
// launch stream computing in a new thread
- SSchedMsg schedMsg = { 0 };
- schedMsg.fp = tscProcessStreamLaunchQuery;
+ SSchedMsg schedMsg = {0};
+ schedMsg.fp = tscProcessStreamLaunchQuery;
schedMsg.ahandle = pStream;
schedMsg.thandle = (void *)1;
- schedMsg.msg = NULL;
+ schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
}
+static void cbParseSql(void* param, TAOS_RES* res, int code);
+
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
@@ -201,24 +204,26 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
pStream, numOfRows, retryDelay);
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0);
-
- char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, name);
-
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
-
- tfree(pTableMetaInfo->pTableMeta);
+ SSqlObj* pSql = pStream->pSql;
- tscFreeSqlResult(pStream->pSql);
- tscFreeSubobj(pStream->pSql);
- tfree(pStream->pSql->pSubs);
- pStream->pSql->subState.numOfSub = 0;
+ tscFreeSqlResult(pSql);
+ tscFreeSubobj(pSql);
+ tfree(pSql->pSubs);
+ pSql->subState.numOfSub = 0;
- pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
+ int32_t code = tsParseSql(pSql, true);
+ if (code == TSDB_CODE_SUCCESS) {
+ cbParseSql(pStream, pSql, code);
+ } else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
+ tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
+ } else {
+ tscError("0x%"PRIx64" open stream failed, code:%s", pSql->self, tstrerror(code));
+ taosReleaseRef(tscObjRef, pSql->self);
+ free(pStream);
+ }
- tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
- return;
+// tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
+// return;
}
taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param);
@@ -555,7 +560,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
-
pStream->fp(pStream->param, NULL, NULL);
return;
}
@@ -582,9 +586,10 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
// set stime with ltime if ltime > stime
const char* dstTable = pStream->dstTable? pStream->dstTable: "";
- tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime);
+ tscDebug("0x%"PRIx64" CQ table %s ltime is %"PRId64, pSql->self, dstTable, pStream->ltime);
+
if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) {
- tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime);
+ tscWarn("0x%"PRIx64" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime > 0", pSql->self, dstTable, pStream->stime, pStream->ltime);
pStream->stime = pStream->ltime;
}
@@ -592,7 +597,6 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
pCmd->command = TSDB_SQL_SELECT;
tscAddIntoStreamList(pStream);
-
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("0x%"PRIx64" stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql->self,
@@ -659,10 +663,9 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
char sql[128] = "";
sprintf(sql, "select last_row(*) from %s;", pStream->dstTable);
taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param);
- return ;
}
-TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
+TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) return NULL;
@@ -697,14 +700,12 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pStream->param = param;
pStream->pSql = pSql;
pStream->cqhandle = cqhandle;
- pSql->pStream = pStream;
- pSql->param = pStream;
- pSql->maxRetry = TSDB_MAX_REPLICA;
tscSetStreamDestTable(pStream, dstTable);
pSql->pStream = pStream;
pSql->param = pStream;
pSql->maxRetry = TSDB_MAX_REPLICA;
+
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
@@ -725,14 +726,13 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pSql->fp = cbParseSql;
pSql->fetchFp = cbParseSql;
-
registerSqlObj(pSql);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_SUCCESS) {
cbParseSql(pStream, pSql, code);
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
- tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr);
+ tscDebug("0x%"PRIx64" CQ taso_open_stream IN Process", pSql->self);
} else {
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
taosReleaseRef(tscObjRef, pSql->self);
@@ -743,7 +743,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
return pStream;
}
-TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
+TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
int64_t stime, void *param, void (*callback)(void *)) {
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
}
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 0d26ec58f68b02cf7e04eccad19c1efff8f16373..99a2a79dc60c89530eb9c2c7f6b5645ca0133ba1 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -15,8 +15,9 @@
#define _GNU_SOURCE
#include "os.h"
-
#include "texpr.h"
+
+#include "tsched.h"
#include "qTsbuf.h"
#include "tcompare.h"
#include "tscLog.h"
@@ -622,13 +623,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value
-#ifndef _TD_NINGSI_60
- pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)};
-#else
- pExpr->base.param[0].i64 = colId;
- pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT;
- pExpr->base.param[0].nLen = sizeof(int64_t);
-#endif
+ tVariant* pVariant = &pExpr->base.param[0];
+
+ pVariant->i64 = colId;
+ pVariant->nType = TSDB_DATA_TYPE_BIGINT;
+ pVariant->nLen = sizeof(int64_t);
+
pExpr->base.numOfParams = 1;
}
@@ -747,10 +747,11 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
SVgroupTableInfo info = {{0}};
for (int32_t m = 0; m < pvg->numOfVgroups; ++m) {
if (tt->vgId == pvg->vgroups[m].vgId) {
- tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]);
+ memcpy(&info.vgInfo, &pvg->vgroups[m], sizeof(info.vgInfo));
break;
}
}
+
assert(info.vgInfo.numOfEps != 0);
vgTables = taosArrayInit(4, sizeof(STableIdInfo));
@@ -1346,7 +1347,11 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pCmd->command = TSDB_SQL_SELECT;
tscResetForNextRetrieve(&pSql->res);
- assert(pSupporter->f == NULL);
+ if (pSupporter->f != NULL) {
+ fclose(pSupporter->f);
+ pSupporter->f = NULL;
+ }
+
taosGetTmpfilePath("ts-join", pSupporter->path);
// TODO check for failure
@@ -2038,17 +2043,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
tscAsyncResultOnError(pSql);
}
-static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL);
-
- SRetrieveSupport* pSupport = pSub->param;
-
- tfree(pSupport->localBuffer);
- tfree(pSupport);
+
+ tscFreeRetrieveSup(pSub);
taos_free_result(pSub);
}
@@ -2406,6 +2408,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} else {
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
+ int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ assert(ti >= 0);
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
+ tscColumnCopy(x, pCol);
}
}
}
@@ -2433,11 +2439,72 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
return terrno;
}
+typedef struct SPair {
+ int32_t first;
+ int32_t second;
+} SPair;
+
+static void doSendQueryReqs(SSchedMsg* pSchedMsg) {
+ SSqlObj* pSql = pSchedMsg->ahandle;
+ SPair* p = pSchedMsg->msg;
+
+ for (int32_t i = p->first; i < p->second; ++i) {
+ if (i >= pSql->subState.numOfSub) {
+ tfree(p);
+ return;
+ }
+ SSqlObj* pSub = pSql->pSubs[i];
+ SRetrieveSupport* pSupport = pSub->param;
+
+ tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch subquery, orderOfSub:%d.", pSql->self, pSub->self, pSupport->subqueryIndex);
+ tscBuildAndSendRequest(pSub, NULL);
+ }
+
+ tfree(p);
+}
+
+static void doConcurrentlySendSubQueries(SSqlObj* pSql) {
+ SSubqueryState *pState = &pSql->subState;
+
+ // concurrently sent the query requests.
+ const int32_t MAX_REQUEST_PER_TASK = 4;
+
+ int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK;
+ assert(numOfTasks >= 1);
+
+ int32_t num;
+ if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) {
+ num = MAX_REQUEST_PER_TASK;
+ } else {
+ num = pState->numOfSub / numOfTasks + 1;
+ }
+ tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks);
+
+ for(int32_t j = 0; j < numOfTasks; ++j) {
+ SSchedMsg schedMsg = {0};
+ schedMsg.fp = doSendQueryReqs;
+ schedMsg.ahandle = (void*)pSql;
+
+ schedMsg.thandle = NULL;
+ SPair* p = calloc(1, sizeof(SPair));
+ p->first = j * num;
+
+ if (j == numOfTasks - 1) {
+ p->second = pState->numOfSub;
+ } else {
+ p->second = (j + 1) * num;
+ }
+
+ schedMsg.msg = p;
+ taosScheduleTask(tscQhandle, &schedMsg);
+ }
+}
+
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
- // pRes->code check only serves in launching metric sub-queries
+ // pRes->code check only serves in launching super table sub-queries
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
pCmd->command = TSDB_SQL_RETRIEVE_GLOBALMERGE; // enable the abort of kill super table function.
return pRes->code;
@@ -2448,22 +2515,23 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
pRes->qId = 0x1; // hack the qhandle check
- const uint32_t nBufferSize = (1u << 18u); // 256KB
+ const uint32_t nBufferSize = (1u << 18u); // 256KB, default buffer size
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
+
SSubqueryState *pState = &pSql->subState;
- pState->numOfSub = 0;
- if (pTableMetaInfo->pVgroupTables == NULL) {
- pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups;
- } else {
- pState->numOfSub = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
+ int32_t numOfSub = (pTableMetaInfo->pVgroupTables == NULL) ? pTableMetaInfo->vgroupList->numOfVgroups
+ : (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
+
+ int32_t ret = doInitSubState(pSql, numOfSub);
+ if (ret != 0) {
+ tscAsyncResultOnError(pSql);
+ return ret;
}
- assert(pState->numOfSub > 0);
-
- int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
+ ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self);
if (ret != 0) {
pRes->code = ret;
tscAsyncResultOnError(pSql);
@@ -2473,32 +2541,6 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
}
tscDebug("0x%"PRIx64" retrieved query data from %d vnode(s)", pSql->self, pState->numOfSub);
- pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES);
- if (pSql->pSubs == NULL) {
- tfree(pSql->pSubs);
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
-
- tscAsyncResultOnError(pSql);
- return ret;
- }
-
- if (pState->states == NULL) {
- pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
- if (pState->states == NULL) {
- pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub);
-
- tscAsyncResultOnError(pSql);
- return ret;
- }
-
- pthread_mutex_init(&pState->mutex, NULL);
- }
-
- memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub);
- tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self);
-
pRes->code = TSDB_CODE_SUCCESS;
int32_t i = 0;
@@ -2512,15 +2554,16 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->pExtMemBuffer = pMemoryBuf;
trs->pOrderDescriptor = pDesc;
- trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
+ trs->localBuffer = (tFilePage *)malloc(nBufferSize + sizeof(tFilePage));
if (trs->localBuffer == NULL) {
tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs);
break;
}
-
- trs->subqueryIndex = i;
- trs->pParentSql = pSql;
+
+ trs->localBuffer->num = 0;
+ trs->subqueryIndex = i;
+ trs->pParentSql = pSql;
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
if (pNew == NULL) {
@@ -2555,19 +2598,12 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
doCleanupSubqueries(pSql, i);
return pRes->code;
}
-
- for(int32_t j = 0; j < pState->numOfSub; ++j) {
- SSqlObj* pSub = pSql->pSubs[j];
- SRetrieveSupport* pSupport = pSub->param;
-
- tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch subquery, orderOfSub:%d.", pSql->self, pSub->self, pSupport->subqueryIndex);
- tscBuildAndSendRequest(pSub, NULL);
- }
+ doConcurrentlySendSubQueries(pSql);
return TSDB_CODE_SUCCESS;
}
-static void tscFreeRetrieveSup(SSqlObj *pSql) {
+void tscFreeRetrieveSup(SSqlObj *pSql) {
SRetrieveSupport *trsupport = pSql->param;
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
@@ -2620,7 +2656,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
int32_t subqueryIndex = trsupport->subqueryIndex;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
- SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
+ SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]);
@@ -2698,7 +2734,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
} else { // reach the maximum retry count, abort
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
- tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self,
+ tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed, code:%s, orderOfSub:%d FAILED. no more retry, set global code:%s", pParentSql->self, pSql->self,
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
}
}
@@ -2725,33 +2761,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
int32_t code = pParentSql->res.code;
- if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
- // remove the cached tableMeta and vgroup id list, and then parse the sql again
- SSqlCmd* pParentCmd = &pParentSql->cmd;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
- tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
+ SSqlObj *userSql = NULL;
+ if (pParentSql->param) {
+ userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql;
+ }
+
+ if (userSql == NULL) {
+ userSql = pParentSql;
+ }
+
+ if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
+ if (userSql != pParentSql) {
+ tscFreeRetrieveSup(pParentSql);
+ }
- pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
- pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ tscFreeSubobj(userSql);
+ tfree(userSql->pSubs);
- pParentSql->res.code = TSDB_CODE_SUCCESS;
- pParentSql->retry++;
+ userSql->res.code = TSDB_CODE_SUCCESS;
+ userSql->retry++;
- tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
- tstrerror(code), pParentSql->retry);
+ tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
+ tstrerror(code), userSql->retry);
- code = tsParseSql(pParentSql, true);
+ tscResetSqlCmd(&userSql->cmd, true, userSql->self);
+ code = tsParseSql(userSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
}
if (code != TSDB_CODE_SUCCESS) {
- pParentSql->res.code = code;
- tscAsyncResultOnError(pParentSql);
+ userSql->res.code = code;
+ tscAsyncResultOnError(userSql);
return;
}
- executeQuery(pParentSql, pQueryInfo);
+ pQueryInfo = tscGetQueryInfo(&userSql->cmd);
+ executeQuery(userSql, pQueryInfo);
} else {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
}
@@ -2821,7 +2867,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
- tscClearInterpInfo(pPQueryInfo);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code;
@@ -2839,7 +2884,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->res.precision = pSql->res.precision;
pParentSql->res.numOfRows = 0;
pParentSql->res.row = 0;
- pParentSql->res.numOfGroups = 0;
tscFreeRetrieveSup(pSql);
@@ -2890,7 +2934,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SSubqueryState* pState = &pParentSql->subState;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
- SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
+ SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
@@ -2936,7 +2980,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
- if (num > tsMaxNumOfOrderedResults && /*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/ !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
+ if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
@@ -3018,7 +3062,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
assert(pQueryInfo->numOfTables == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0);
- SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex];
+ SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex];
// stable query killed or other subquery failed, all query stopped
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
@@ -3040,7 +3084,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(code == taos_errno(pSql));
- if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID)) {
+ if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && (code != TSDB_CODE_TDB_INVALID_TABLE_ID && code != TSDB_CODE_VND_INVALID_VGROUP_ID)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0;
@@ -3151,7 +3195,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
numOfFailed += 1;
// clean up tableMeta in cache
- tscFreeQueryInfo(&pSql->cmd, false);
+ tscFreeQueryInfo(&pSql->cmd, false, pSql->self);
SQueryInfo* pQueryInfo = tscGetQueryInfoS(&pSql->cmd);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, 0);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
@@ -3173,7 +3217,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
pParentObj->res.code = TSDB_CODE_SUCCESS;
- tscResetSqlCmd(&pParentObj->cmd, false);
+ tscResetSqlCmd(&pParentObj->cmd, false, pParentObj->self);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
@@ -3364,7 +3408,6 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
return;
}
-// tscRestoreFuncForSTableQuery(pQueryInfo);
int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList);
assert(numOfRes * rowSize > 0);
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index c04765b0651f59066dd5897f2eaf0924b7113a21..b3b83db80a70c19f79d1cd6a732d729817436dd3 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -50,6 +50,7 @@ int tscLogFileNum = 10;
static pthread_mutex_t rpcObjMutex; // mutex to protect open the rpc obj concurrently
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
+static pthread_mutex_t setConfMutex = PTHREAD_MUTEX_INITIALIZER;
// pthread_once can not return result code, so result code is set to a global variable.
static volatile int tscInitRes = 0;
@@ -122,6 +123,10 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry
void taos_init_imp(void) {
char temp[128] = {0};
+
+ // In the APIs of other program language, taos_cleanup is not available yet.
+ // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
+ atexit(taos_cleanup);
errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec());
@@ -197,10 +202,6 @@ void taos_init_imp(void) {
tscRefId = taosOpenRef(200, tscCloseTscObj);
- // In the APIs of other program language, taos_cleanup is not available yet.
- // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
- atexit(taos_cleanup);
-
tscDebug("client is initialized successfully");
}
@@ -249,6 +250,7 @@ void taos_cleanup(void) {
pthread_mutex_destroy(&rpcObjMutex);
}
+ pthread_mutex_destroy(&setConfMutex);
taosCacheCleanup(tscVgroupListBuf);
tscVgroupListBuf = NULL;
@@ -437,3 +439,66 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) {
atomic_store_32(&lock, 0);
return ret;
}
+
+#include "cJSON.h"
+static setConfRet taos_set_config_imp(const char *config){
+ setConfRet ret = {SET_CONF_RET_SUCC, {0}};
+ static bool setConfFlag = false;
+ if (setConfFlag) {
+ ret.retCode = SET_CONF_RET_ERR_ONLY_ONCE;
+ strcpy(ret.retMsg, "configuration can only set once");
+ return ret;
+ }
+ taosInitGlobalCfg();
+ cJSON *root = cJSON_Parse(config);
+ if (root == NULL){
+ ret.retCode = SET_CONF_RET_ERR_JSON_PARSE;
+ strcpy(ret.retMsg, "parse json error");
+ return ret;
+ }
+
+ int size = cJSON_GetArraySize(root);
+ if(!cJSON_IsObject(root) || size == 0) {
+ ret.retCode = SET_CONF_RET_ERR_JSON_INVALID;
+ strcpy(ret.retMsg, "json content is invalid, must be not empty object");
+ return ret;
+ }
+
+ if(size >= 1000) {
+ ret.retCode = SET_CONF_RET_ERR_TOO_LONG;
+ strcpy(ret.retMsg, "json object size is too long");
+ return ret;
+ }
+
+ for(int i = 0; i < size; i++){
+ cJSON *item = cJSON_GetArrayItem(root, i);
+ if(!item) {
+ ret.retCode = SET_CONF_RET_ERR_INNER;
+ strcpy(ret.retMsg, "inner error");
+ return ret;
+ }
+ if(!taosReadConfigOption(item->string, item->valuestring, NULL, NULL, TAOS_CFG_CSTATUS_OPTION, TSDB_CFG_CTYPE_B_CLIENT)){
+ ret.retCode = SET_CONF_RET_ERR_PART;
+ if (strlen(ret.retMsg) == 0){
+ snprintf(ret.retMsg, RET_MSG_LENGTH, "part error|%s", item->string);
+ }else{
+ int tmp = RET_MSG_LENGTH - 1 - (int)strlen(ret.retMsg);
+ size_t leftSize = tmp >= 0 ? tmp : 0;
+ strncat(ret.retMsg, "|", leftSize);
+ tmp = RET_MSG_LENGTH - 1 - (int)strlen(ret.retMsg);
+ leftSize = tmp >= 0 ? tmp : 0;
+ strncat(ret.retMsg, item->string, leftSize);
+ }
+ }
+ }
+ cJSON_Delete(root);
+ setConfFlag = true;
+ return ret;
+}
+
+setConfRet taos_set_config(const char *config){
+ pthread_mutex_lock(&setConfMutex);
+ setConfRet ret = taos_set_config_imp(config);
+ pthread_mutex_unlock(&setConfMutex);
+ return ret;
+}
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 19a816faeb628e2796a184db2b049a0019e7918c..60a6e241ccdfd48c7eb5a68f2dd7a251f76097a5 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -29,6 +29,7 @@
#include "tsclient.h"
#include "ttimer.h"
#include "ttokendef.h"
+#include "httpInt.h"
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
@@ -268,7 +269,10 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
functionId != TSDB_FUNC_DIFF &&
functionId != TSDB_FUNC_DERIVATIVE &&
functionId != TSDB_FUNC_TS_DUMMY &&
- functionId != TSDB_FUNC_TID_TAG) {
+ functionId != TSDB_FUNC_TID_TAG &&
+ functionId != TSDB_FUNC_CEIL &&
+ functionId != TSDB_FUNC_FLOOR &&
+ functionId != TSDB_FUNC_ROUND) {
return false;
}
}
@@ -403,6 +407,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TS) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@@ -659,8 +684,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
- pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
-
+ char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ if(buffer == NULL)
+ return ;
+ pRes->buffer[i] = buffer;
// string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
@@ -787,7 +814,7 @@ typedef struct SDummyInputInfo {
SSDataBlock *block;
STableQueryInfo *pTableQueryInfo;
SSqlObj *pSql; // refactor: remove it
- SFilterInfo *pFilterInfo;
+ void *pFilterInfo;
} SDummyInputInfo;
typedef struct SJoinStatus {
@@ -803,7 +830,7 @@ typedef struct SJoinOperatorInfo {
SRspResultInfo resultInfo; // todo refactor, add this info for each operator
} SJoinOperatorInfo;
-static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* pFilterInfo) {
+static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, void* pFilterInfo) {
int32_t offset = 0;
char* pData = pRes->data;
@@ -820,18 +847,24 @@ static void doSetupSDataBlock(SSqlRes* pRes, SSDataBlock* pBlock, SFilterInfo* p
// filter data if needed
if (pFilterInfo) {
- //doSetFilterColumnInfo(pFilterInfo, numOfFilterCols, pBlock);
- doSetFilterColInfo(pFilterInfo, pBlock);
+ SColumnDataParam param = {.numOfCols = pBlock->info.numOfCols, .pDataBlock = pBlock->pDataBlock};
+ filterSetColFieldData(pFilterInfo, ¶m, getColumnDataFromId);
+
bool gotNchar = false;
filterConverNcharColumns(pFilterInfo, pBlock->info.rows, &gotNchar);
- int8_t* p = calloc(pBlock->info.rows, sizeof(int8_t));
+ int8_t* p = NULL;
//bool all = doFilterDataBlock(pFilterInfo, numOfFilterCols, pBlock->info.rows, p);
- bool all = filterExecute(pFilterInfo, pBlock->info.rows, p);
+ bool all = filterExecute(pFilterInfo, pBlock->info.rows, &p, NULL, 0);
if (gotNchar) {
filterFreeNcharColumns(pFilterInfo);
}
if (!all) {
- doCompactSDataBlock(pBlock, pBlock->info.rows, p);
+ if (p) {
+ doCompactSDataBlock(pBlock, pBlock->info.rows, p);
+ } else {
+ pBlock->info.rows = 0;
+ pBlock->pBlockStatis = NULL; // clean the block statistics info
+ }
}
tfree(p);
@@ -1079,7 +1112,7 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
}
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
-SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, SFilterInfo* pFilters) {
+SOperatorInfo* createDummyInputOperator(SSqlObj* pSql, SSchema* pSchema, int32_t numOfCols, void* pFilters) {
assert(numOfCols > 0);
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
@@ -1221,14 +1254,13 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
// if it is a join query, create join operator here
int32_t numOfCol1 = pTableMeta->tableInfo.numOfColumns;
- SFilterInfo *pFilters = NULL;
+ void *pFilters = NULL;
STblCond *pCond = NULL;
if (px->colCond) {
pCond = tsGetTableFilter(px->colCond, pTableMeta->id.uid, 0);
if (pCond && pCond->cond) {
createQueryFilter(pCond->cond, pCond->len, &pFilters);
}
- //createInputDataFlterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo);
}
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
@@ -1249,7 +1281,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
for(int32_t i = 1; i < px->numOfTables; ++i) {
STableMeta* pTableMeta1 = tscGetMetaInfo(px, i)->pTableMeta;
numOfCol1 = pTableMeta1->tableInfo.numOfColumns;
- SFilterInfo *pFilters1 = NULL;
+ void *pFilters1 = NULL;
SSchema* pSchema1 = tscGetTableSchema(pTableMeta1);
int32_t n = pTableMeta1->tableInfo.numOfColumns;
@@ -1319,25 +1351,19 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
tfree(pRes->buffer);
tfree(pRes->urow);
- tfree(pRes->pGroupRec);
tfree(pRes->pColumnIndex);
-
- if (pRes->pArithSup != NULL) {
- tfree(pRes->pArithSup->data);
- tfree(pRes->pArithSup);
- }
-
tfree(pRes->final);
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
-void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
+void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeCachedMeta, uint64_t id) {
if (pCmd == NULL) {
return;
}
SQueryInfo* pQueryInfo = pCmd->pQueryInfo;
+
while(pQueryInfo != NULL) {
SQueryInfo* p = pQueryInfo->sibling;
@@ -1346,7 +1372,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
SQueryInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i);
freeQueryInfoImpl(pUpQueryInfo);
- clearAllTableMetaInfo(pUpQueryInfo, removeMeta);
+ clearAllTableMetaInfo(pUpQueryInfo, removeCachedMeta, id);
if (pUpQueryInfo->pQInfo != NULL) {
qDestroyQueryInfo(pUpQueryInfo->pQInfo);
pUpQueryInfo->pQInfo = NULL;
@@ -1362,7 +1388,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeMeta) {
}
freeQueryInfoImpl(pQueryInfo);
- clearAllTableMetaInfo(pQueryInfo, removeMeta);
+ clearAllTableMetaInfo(pQueryInfo, removeCachedMeta, id);
if (pQueryInfo->pQInfo != NULL) {
qDestroyQueryInfo(pQueryInfo->pQInfo);
@@ -1391,7 +1417,7 @@ void destroyTableNameList(SInsertStatementParam* pInsertParam) {
tfree(pInsertParam->pTableNameList);
}
-void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
+void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta, uint64_t id) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@@ -1405,19 +1431,8 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) {
tfree(pCmd->insertParam.tagData.data);
pCmd->insertParam.tagData.dataLen = 0;
- tscFreeQueryInfo(pCmd, clearCachedMeta);
-
- if (pCmd->pTableMetaMap != NULL) {
- STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL);
- while (p) {
- taosArrayDestroy(p->vgroupIdList);
- tfree(p->pTableMeta);
- p = taosHashIterate(pCmd->pTableMetaMap, p);
- }
-
- taosHashCleanup(pCmd->pTableMetaMap);
- pCmd->pTableMetaMap = NULL;
- }
+ tscFreeQueryInfo(pCmd, clearCachedMeta, id);
+ pCmd->pTableMetaMap = tscCleanupTableMetaMap(pCmd->pTableMetaMap);
}
void* tscCleanupTableMetaMap(SHashObj* pTableMetaMap) {
@@ -1454,7 +1469,12 @@ void tscFreeSubobj(SSqlObj* pSql) {
tscDebug("0x%"PRIx64" start to free sub SqlObj, numOfSub:%d", pSql->self, pSql->subState.numOfSub);
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
- tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
+ if (pSql->pSubs[i] != NULL) {
+ tscDebug("0x%"PRIx64" free sub SqlObj:0x%"PRIx64", index:%d", pSql->self, pSql->pSubs[i]->self, i);
+ } else {
+ /* just for python error test case */
+ tscDebug("0x%"PRIx64" free sub SqlObj:0x0, index:%d", pSql->self, i);
+ }
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
@@ -1513,8 +1533,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscFreeMetaSqlObj(&pSql->metaRid);
tscFreeMetaSqlObj(&pSql->svgroupRid);
- tscFreeSubobj(pSql);
-
SSqlCmd* pCmd = &pSql->cmd;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_GLOBALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
@@ -1522,6 +1540,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscRemoveFromSqlList(pSql);
}
+ tscFreeSubobj(pSql);
+
pSql->signature = NULL;
pSql->fp = NULL;
tfree(pSql->sqlstr);
@@ -1532,9 +1552,8 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
- tscResetSqlCmd(pCmd, false);
+ tscResetSqlCmd(pCmd, false, pSql->self);
- memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
tfree(pCmd->payload);
pCmd->allocSize = 0;
@@ -2070,32 +2089,35 @@ bool tscIsInsertData(char* sqlstr) {
} while (1);
}
-int tscAllocPayload(SSqlCmd* pCmd, int size) {
+int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) {
if (pCmd->payload == NULL) {
assert(pCmd->allocSize == 0);
- pCmd->payload = (char*)calloc(1, size);
- if (pCmd->payload == NULL) {
+ pCmd->payload = malloc(size);
+ pCmd->allocSize = (uint32_t) size;
+ } else if (pCmd->allocSize < size) {
+ char* tmp = realloc(pCmd->payload, size);
+ if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- pCmd->allocSize = size;
- } else {
- if (pCmd->allocSize < (uint32_t)size) {
- char* b = realloc(pCmd->payload, size);
- if (b == NULL) {
- return TSDB_CODE_TSC_OUT_OF_MEMORY;
- }
+ pCmd->payload = tmp;
+ pCmd->allocSize = (uint32_t) size;
+ }
- pCmd->payload = b;
- pCmd->allocSize = size;
- }
+ assert(pCmd->allocSize >= size);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t tscAllocPayload(SSqlCmd* pCmd, int size) {
+ assert(size > 0);
+ int32_t code = tscAllocPayloadFast(pCmd, (size_t) size);
+ if (code == TSDB_CODE_SUCCESS) {
memset(pCmd->payload, 0, pCmd->allocSize);
}
- assert(pCmd->allocSize >= (uint32_t)size && size > 0);
- return TSDB_CODE_SUCCESS;
+ return code;
}
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
@@ -2104,6 +2126,22 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
return f;
}
+int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo) {
+ if (pQueryInfo->fieldsInfo.numOfOutput <= 0 || pQueryInfo->fieldsInfo.internalField == NULL) {
+ return 0;
+ }
+
+ for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
+ SInternalField* pField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
+ if (!pField->visible) {
+ return i;
+ }
+ }
+
+ return pQueryInfo->fieldsInfo.numOfOutput;
+}
+
+
SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
assert(pFieldInfo != NULL);
pFieldInfo->numOfOutput++;
@@ -2427,6 +2465,19 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList);
}
+int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+ for(int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL)
+ continue;
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return i;
+ }
+ }
+ return -1;
+}
+
// todo REFACTOR
void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
assert (pExpr != NULL || argument != NULL || bytes != 0);
@@ -2856,16 +2907,6 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
memset(dest, 0, sizeof(STagCond));
- if (src->tbnameCond.cond != NULL) {
- dest->tbnameCond.cond = strdup(src->tbnameCond.cond);
- if (dest->tbnameCond.cond == NULL) {
- return -1;
- }
- }
-
- dest->tbnameCond.uid = src->tbnameCond.uid;
- dest->tbnameCond.len = src->tbnameCond.len;
-
dest->joinInfo.hasJoin = src->joinInfo.hasJoin;
for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) {
@@ -2884,9 +2925,6 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
}
}
-
- dest->relType = src->relType;
-
if (src->pCond == NULL) {
return 0;
}
@@ -2976,8 +3014,6 @@ void tscColCondRelease(SArray** pCond) {
void tscTagCondRelease(STagCond* pTagCond) {
- free(pTagCond->tbnameCond.cond);
-
if (pTagCond->pCond != NULL) {
size_t s = taosArrayGetSize(pTagCond->pCond);
for (int32_t i = 0; i < s; ++i) {
@@ -3136,6 +3172,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->slimit.offset = 0;
pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->window = TSWINDOW_INITIALIZER;
+ pQueryInfo->multigroupResult = true;
}
int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
@@ -3147,7 +3184,6 @@ int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
}
tscInitQueryInfo(pQueryInfo);
-
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
if (pCmd->pQueryInfo == NULL) {
@@ -3196,6 +3232,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
taosArrayDestroy(pQueryInfo->pUpstream);
pQueryInfo->pUpstream = NULL;
+ pQueryInfo->bufLen = 0;
}
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
@@ -3230,6 +3267,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->window = pSrc->window;
pQueryInfo->sessionWindow = pSrc->sessionWindow;
pQueryInfo->pTableMetaInfo = NULL;
+ pQueryInfo->multigroupResult = pSrc->multigroupResult;
pQueryInfo->bufLen = pSrc->bufLen;
pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
@@ -3321,11 +3359,11 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) {
size_t num = taosArrayGetSize(pVgroupTables);
for (size_t i = 0; i < num; i++) {
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i);
-
+#if 0
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
tfree(pInfo->vgInfo.epAddr[j].fqdn);
}
-
+#endif
taosArrayDestroy(pInfo->itemList);
}
@@ -3339,9 +3377,9 @@ void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) {
assert(size > index);
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index);
- for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
- tfree(pInfo->vgInfo.epAddr[j].fqdn);
- }
+// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
+// tfree(pInfo->vgInfo.epAddr[j].fqdn);
+// }
taosArrayDestroy(pInfo->itemList);
taosArrayRemove(pVgroupTable, index);
@@ -3351,9 +3389,12 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
memset(info, 0, sizeof(SVgroupTableInfo));
info->vgInfo = pInfo->vgInfo;
+
+#if 0
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
+#endif
if (pInfo->itemList) {
info->itemList = taosArrayDup(pInfo->itemList);
@@ -3379,20 +3420,15 @@ SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
return pa;
}
-void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) {
+void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta, uint64_t id) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
-
if (removeMeta) {
- char name[TSDB_TABLE_FNAME_LEN] = {0};
- tNameExtractFullName(&pTableMetaInfo->name, name);
- taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
+ tscRemoveCachedTableMeta(pTableMetaInfo, id);
}
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
tscClearTableMetaInfo(pTableMetaInfo);
-
- free(pTableMetaInfo);
}
tfree(pQueryInfo->pTableMetaInfo);
@@ -3421,13 +3457,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
}
pTableMetaInfo->pTableMeta = pTableMeta;
- if (pTableMetaInfo->pTableMeta == NULL) {
- pTableMetaInfo->tableMetaSize = 0;
- } else {
- pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta);
- }
+ pTableMetaInfo->tableMetaSize = (pTableMetaInfo->pTableMeta == NULL)? 0:tscGetTableMetaSize(pTableMeta);
+
pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize);
-
if (vgroupList != NULL) {
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
@@ -3459,10 +3491,12 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo) {
}
tfree(pTableMetaInfo->pTableMeta);
-
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
+
tscColumnListDestroy(pTableMetaInfo->tagColList);
pTableMetaInfo->tagColList = NULL;
+
+ free(pTableMetaInfo);
}
void tscResetForNextRetrieve(SSqlRes* pRes) {
@@ -3618,23 +3652,25 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pnCmd->active = pNewQueryInfo;
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
- pNewQueryInfo->type = pQueryInfo->type;
- pNewQueryInfo->window = pQueryInfo->window;
- pNewQueryInfo->limit = pQueryInfo->limit;
- pNewQueryInfo->slimit = pQueryInfo->slimit;
- pNewQueryInfo->order = pQueryInfo->order;
- pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit;
- pNewQueryInfo->tsBuf = NULL;
- pNewQueryInfo->fillType = pQueryInfo->fillType;
- pNewQueryInfo->fillVal = NULL;
+ pNewQueryInfo->type = pQueryInfo->type;
+ pNewQueryInfo->window = pQueryInfo->window;
+ pNewQueryInfo->limit = pQueryInfo->limit;
+ pNewQueryInfo->slimit = pQueryInfo->slimit;
+ pNewQueryInfo->order = pQueryInfo->order;
+ pNewQueryInfo->tsBuf = NULL;
+ pNewQueryInfo->fillType = pQueryInfo->fillType;
+ pNewQueryInfo->fillVal = NULL;
+ pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
+ pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfFillVal = 0;
- pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
- pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
- pNewQueryInfo->numOfTables = 0;
+ pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
- pNewQueryInfo->bufLen = pQueryInfo->bufLen;
+ pNewQueryInfo->bufLen = pQueryInfo->bufLen;
+ pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit;
+ pNewQueryInfo->distinct = pQueryInfo->distinct;
+ pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult;
- pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
+ pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@@ -3671,8 +3707,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
- pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput;
+ pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput;
memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
}
@@ -3713,7 +3749,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
-
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
@@ -3723,8 +3758,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
- pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
- pTableMetaInfo->pVgroupTables);
+ pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo,
+ pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
@@ -3850,8 +3885,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
int32_t index = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index);
- tfree(ps);
- pSql->param = NULL;
+ tscFreeRetrieveSup(pSql);
if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
@@ -3860,13 +3894,15 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
// todo refactor
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
+ if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
+ pParentSql->res.code = code;
- SSqlCmd* pParentCmd = &pParentSql->cmd;
- STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0);
- tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self);
+ tscAsyncResultOnError(pParentSql);
+ return;
+ }
- pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap);
- pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ tscFreeSubobj(pParentSql);
+ tfree(pParentSql->pSubs);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
@@ -3874,6 +3910,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
+
+ tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
+
code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
@@ -3885,7 +3924,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
return;
}
- SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd);
+ SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
executeQuery(pParentSql, pQueryInfo);
return;
}
@@ -3893,6 +3932,21 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
taos_fetch_rows_a(tres, tscSubqueryRetrieveCallback, param);
}
+int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries) {
+ assert(pSql->subState.numOfSub == 0 && pSql->pSubs == NULL && pSql->subState.states == NULL);
+ pSql->subState.numOfSub = numOfSubqueries;
+
+ pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
+ pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
+
+ int32_t code = pthread_mutex_init(&pSql->subState.mutex, NULL);
+ if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != 0) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
// do execute the query according to the query execution plan
void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
int32_t code = TSDB_CODE_SUCCESS;
@@ -3908,14 +3962,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
}
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
- pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
-
- pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
- pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
- code = pthread_mutex_init(&pSql->subState.mutex, NULL);
-
- if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != TSDB_CODE_SUCCESS) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream));
+ if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3935,7 +3983,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->signature = pNew;
pNew->sqlstr = strdup(pSql->sqlstr);
pNew->fp = tscSubqueryCompleteCallback;
+ pNew->fetchFp = tscSubqueryCompleteCallback;
pNew->maxRetry = pSql->maxRetry;
+
+ pNew->cmd.resColumnId = TSDB_RES_COL_ID;
+
tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
@@ -4112,6 +4164,31 @@ int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql) {
+ const char* msgFormat1 = "%s:%s";
+ const char* msgFormat2 = "%s:\'%s\' (%s)";
+ const char* msgFormat3 = "%s:\'%s\'";
+
+ const int32_t BACKWARD_CHAR_STEP = 0;
+
+ if (sql == NULL) {
+ assert(errMsg != NULL);
+ sprintf(dstBuffer, msgFormat1, tstrerror(code), errMsg);
+ return code;
+ }
+
+ char buf[64] = {0}; // only extract part of sql string
+ strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1);
+
+ if (errMsg != NULL) {
+ sprintf(dstBuffer, msgFormat2, tstrerror(code), buf, errMsg);
+ } else {
+ sprintf(dstBuffer, msgFormat3, tstrerror(code), buf); // no additional information for invalid sql error
+ }
+
+ return code;
+}
+
bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
assert(pQueryInfo != NULL && pQueryInfo->clauseLimit != 0);
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
@@ -4233,7 +4310,9 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
}
tfree(pSql->pSubs);
+ tfree(pSql->subState.states);
pSql->subState.numOfSub = 0;
+ pthread_mutex_destroy(&pSql->subState.mutex);
pSql->fp = fp;
@@ -4324,8 +4403,8 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) {
return NULL;
}
- size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups;
- SVgroupsInfo* pNew = calloc(1, size);
+ size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupMsg) * vgroupList->numOfVgroups;
+ SVgroupsInfo* pNew = malloc(size);
if (pNew == NULL) {
return NULL;
}
@@ -4333,15 +4412,15 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) {
pNew->numOfVgroups = vgroupList->numOfVgroups;
for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) {
- SVgroupInfo* pNewVInfo = &pNew->vgroups[i];
+ SVgroupMsg* pNewVInfo = &pNew->vgroups[i];
- SVgroupInfo* pvInfo = &vgroupList->vgroups[i];
+ SVgroupMsg* pvInfo = &vgroupList->vgroups[i];
pNewVInfo->vgId = pvInfo->vgId;
pNewVInfo->numOfEps = pvInfo->numOfEps;
for(int32_t j = 0; j < pvInfo->numOfEps; ++j) {
- pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn);
pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port;
+ tstrncpy(pNewVInfo->epAddr[j].fqdn, pvInfo->epAddr[j].fqdn, TSDB_FQDN_LEN);
}
}
@@ -4353,8 +4432,9 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) {
return NULL;
}
+#if 0
for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) {
- SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i];
+ SVgroupMsg* pVgroupInfo = &vgroupList->vgroups[i];
for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) {
tfree(pVgroupInfo->epAddr[j].fqdn);
@@ -4365,10 +4445,11 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) {
}
}
+#endif
tfree(vgroupList);
return NULL;
}
-
+# if 0
void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) {
dst->vgId = src->vgId;
dst->numOfEps = src->numOfEps;
@@ -4381,6 +4462,8 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) {
}
}
+#endif
+
char* serializeTagData(STagData* pTagData, char* pMsg) {
int32_t n = (int32_t) strlen(pTagData->name);
*(int32_t*) pMsg = htonl(n);
@@ -4468,21 +4551,27 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
assert(*ppChild != NULL);
STableMeta* p = *ppSTable;
STableMeta* pChild = *ppChild;
- size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
+
+ size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
if (p != NULL && sz != 0) {
memset((char *)p, 0, sz);
}
- taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
- *ppSTable = p;
+
+ if (NULL == taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz)) {
+ tfree(p);
+ } else {
+ *ppSTable = p;
+ }
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
if (p && p->id.uid > 0 && pChild->suid == p->id.uid) {
-
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
- pChild = realloc(pChild, tableMetaSize);
+ STableMeta* pChild1 = realloc(pChild, tableMetaSize);
+ if(pChild1 == NULL) return -1;
+ pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize;
}
@@ -4515,11 +4604,12 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) {
assert(pVgroupsInfo != NULL);
- size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo);
+ size_t size = sizeof(SVgroupMsg) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo);
SVgroupsInfo* pInfo = calloc(1, size);
pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups;
for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) {
- tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]);
+ memcpy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m], sizeof(SVgroupMsg));
+// tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]);
}
return pInfo;
}
@@ -4752,6 +4842,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->distinct = pQueryInfo->distinct;
pQueryAttr->sw = pQueryInfo->sessionWindow;
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
+ pQueryAttr->multigroupResult = pQueryInfo->multigroupResult;
pQueryAttr->numOfCols = numOfCols;
pQueryAttr->numOfOutput = numOfOutput;
@@ -5009,7 +5100,7 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
return info;
}
-void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
+void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id) {
char fname[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, fname);
@@ -5024,3 +5115,31 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) {
taosHashRemove(tscTableMetaMap, fname, len);
tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
}
+
+char* cloneCurrentDBName(SSqlObj* pSql) {
+ char *p = NULL;
+ HttpContext *pCtx = NULL;
+
+ pthread_mutex_lock(&pSql->pTscObj->mutex);
+ STscObj *pTscObj = pSql->pTscObj;
+ switch (pTscObj->from) {
+ case TAOS_REQ_FROM_HTTP:
+ pCtx = pSql->param;
+ if (pCtx && pCtx->db[0] != '\0') {
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
+ int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
+ assert(len <= sizeof(db));
+
+ p = strdup(db);
+ }
+ break;
+ default:
+ break;
+ }
+ if (p == NULL) {
+ p = strdup(pSql->pTscObj->db);
+ }
+ pthread_mutex_unlock(&pSql->pTscObj->mutex);
+
+ return p;
+}
diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt
index 24bfb44ac90e11e01ba99423aa68bd5a9511f746..5de18942acbb5b3ac59d2496728c500b63246fe9 100644
--- a/src/client/tests/CMakeLists.txt
+++ b/src/client/tests/CMakeLists.txt
@@ -17,5 +17,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(cliTest ${SOURCE_LIST})
- TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread)
+ TARGET_LINK_LIBRARIES(cliTest taos cJson tutil common gtest pthread)
ENDIF()
diff --git a/src/client/tests/setConfigTest.cpp b/src/client/tests/setConfigTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fb016715f6ad2f5311aa2d81b608c2043f86c4f0
--- /dev/null
+++ b/src/client/tests/setConfigTest.cpp
@@ -0,0 +1,71 @@
+#include
+#include
+
+#include "taos.h"
+#include "tglobal.h"
+#include "tconfig.h"
+
+/* test set config function */
+TEST(testCase, set_config_test1) {
+ const char *config = "{\"debugFlag\":\"131\"}";
+ setConfRet ret = taos_set_config(config);
+ ASSERT_EQ(ret.retCode, 0);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+
+ const char *config2 = "{\"debugFlag\":\"199\"}";
+ ret = taos_set_config(config2); // not take effect
+ ASSERT_EQ(ret.retCode, -5);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+
+ bool readResult = taosReadGlobalCfg(); // load file config, debugFlag not take effect
+ ASSERT_TRUE(readResult);
+ int32_t checkResult = taosCheckGlobalCfg();
+ ASSERT_EQ(checkResult, 0);
+
+ SGlobalCfg *cfg = taosGetConfigOption("debugFlag");
+ ASSERT_EQ(cfg->cfgStatus, TAOS_CFG_CSTATUS_OPTION);
+ int32_t result = *(int32_t *)cfg->ptr;
+ ASSERT_EQ(result, 131);
+}
+
+TEST(testCase, set_config_test2) {
+ const char *config = "{\"numOfCommitThreads\":\"10\"}";
+ taos_set_config(config);
+
+ bool readResult = taosReadGlobalCfg(); // load file config, debugFlag not take effect
+ ASSERT_TRUE(readResult);
+ int32_t checkResult = taosCheckGlobalCfg();
+ ASSERT_EQ(checkResult, 0);
+
+ SGlobalCfg *cfg = taosGetConfigOption("numOfCommitThreads");
+ int32_t result = *(int32_t*)cfg->ptr;
+ ASSERT_NE(result, 10); // numOfCommitThreads not type of TSDB_CFG_CTYPE_B_CLIENT
+}
+
+TEST(testCase, set_config_test3) {
+ const char *config = "{\"numOfCoitThreads\":\"10\", \"esdfa\":\"10\"}";
+ setConfRet ret = taos_set_config(config);
+ ASSERT_EQ(ret.retCode, -1);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+}
+
+TEST(testCase, set_config_test4) {
+ const char *config = "{null}";
+ setConfRet ret = taos_set_config(config);
+ ASSERT_EQ(ret.retCode, -4);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+}
+
+TEST(testCase, set_config_test5) {
+ const char *config = "\"ddd\"";
+ setConfRet ret = taos_set_config(config);
+ ASSERT_EQ(ret.retCode, -3);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+}
+
+TEST(testCase, set_config_test6) {
+ const char *config = "{\"numOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitThreadsnumOfCoitT3333dd\":\"10\", \"esdfa\":\"10\"}";
+ setConfRet ret = taos_set_config(config);
+ ASSERT_EQ(ret.retCode, -1);
+ printf("msg:%d->%s", ret.retCode, ret.retMsg);
+}
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 46259c8488617b77a940736d073454d4349a7774..a01c3775397e25849d9e8ff70409db7ac0af90ba 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
- if (pBuilder->pColIdx == NULL) return -1;
+ SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
+ if (pColIdx == NULL) return -1;
+ pBuilder->pColIdx = pColIdx;
}
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2;
}
- pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
- if (pBuilder->buf == NULL) return -1;
+ void* buf = realloc(pBuilder->buf, pBuilder->alloc);
+ if (buf == NULL) return -1;
+ pBuilder->buf = buf;
}
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);
diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h
index 2e49a69366c2277c98ec32a1d8419c141ddecc0f..bfeb3a6dfeee22f793c82748611c28ec537e8825 100644
--- a/src/common/inc/texpr.h
+++ b/src/common/inc/texpr.h
@@ -33,9 +33,13 @@ struct SSchema;
#define QUERY_COND_REL_PREFIX_IN "IN|"
#define QUERY_COND_REL_PREFIX_LIKE "LIKE|"
+#define QUERY_COND_REL_PREFIX_MATCH "MATCH|"
+#define QUERY_COND_REL_PREFIX_NMATCH "NMATCH|"
#define QUERY_COND_REL_PREFIX_IN_LEN 3
#define QUERY_COND_REL_PREFIX_LIKE_LEN 5
+#define QUERY_COND_REL_PREFIX_MATCH_LEN 6
+#define QUERY_COND_REL_PREFIX_NMATCH_LEN 7
typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *);
@@ -84,7 +88,6 @@ void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
tExprNode* exprTreeFromBinary(const void* data, size_t size);
-tExprNode* exprTreeFromTableName(const char* tbnameCond);
tExprNode* exprdup(tExprNode* pTree);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 62f369d98777fa79bf40300d178f5f5eeb04a2a4..604ce89432bcf662b319fb2ec11f55026450a2be 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -60,6 +60,8 @@ extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
+extern int32_t tsCompressColData;
+extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
//query buffer management
@@ -72,6 +74,7 @@ extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsMaxSQLStringLen;
extern int32_t tsMaxWildCardsLen;
+extern int32_t tsMaxRegexStringLen;
extern int8_t tsTscEnableRecordSql;
extern int32_t tsMaxNumOfOrderedResults;
extern int32_t tsMinSlidingTime;
@@ -105,6 +108,9 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate;
extern int8_t tsCacheLastRow;
+//tsdb
+extern bool tsdbForceKeepFile;
+
// balance
extern int8_t tsEnableBalance;
extern int8_t tsAlternativeRole;
@@ -125,6 +131,7 @@ extern int32_t tsHttpMaxThreads;
extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum;
+extern int8_t tsHttpDbNameMandatory;
// mqtt
extern int8_t tsEnableMqttModule;
@@ -158,6 +165,7 @@ extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
extern int64_t tsTickPerDay[3];
+extern int32_t tsTopicBianryLen;
// system info
extern char tsOsName[];
@@ -216,6 +224,8 @@ extern uint32_t maxRange;
extern uint32_t curRange;
extern char Compressor[];
#endif
+// long query
+extern int8_t tsDeadLockKillQuery;
typedef struct {
char dir[TSDB_FILENAME_LEN];
diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c
index 3779303e1a41275996c52f828d433d2d68805fdf..000ef79fcf9b5ee9e52dae65b99f719cec6a8059 100644
--- a/src/common/src/tarithoperator.c
+++ b/src/common/src/tarithoperator.c
@@ -21,187 +21,6 @@
#include "tcompare.h"
//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i]));
-#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
- { \
- int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
- int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \
- \
- if ((len1) == (len2)) { \
- for (; i < (len2) && i >= 0; i += step, (out) += 1) { \
- if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] op(right)[i]; \
- } \
- } else if ((len1) == 1) { \
- for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
- if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[0] op(right)[i]; \
- } \
- } else if ((len2) == 1) { \
- for (; i >= 0 && i < (len1); i += step, (out) += 1) { \
- if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] op(right)[0]; \
- } \
- } \
- }
-#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
- { \
- int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
- int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \
- \
- if ((len1) == (len2)) { \
- for (; i < (len2) && i >= 0; i += step, (out) += 1) { \
- if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] op(right)[i]; \
- } \
- } else if ((len1) == 1) { \
- for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
- if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[0] op(right)[i]; \
- } \
- } else if ((len2) == 1) { \
- for (; i >= 0 && i < (len1); i += step, (out) += 1) { \
- if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] op(right)[0]; \
- } \
- } \
- }
-
-#define ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
- { \
- int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \
- int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1; \
- \
- if (len1 == (len2)) { \
- for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
- if (isNull((char *)&(left[i]), _left_type) || isNull((char *)&(right[i]), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \
- } \
- } else if (len1 == 1) { \
- for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
- if (isNull((char *)(left), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \
- } \
- } else if ((len2) == 1) { \
- for (; i >= 0 && i < len1; i += step, (out) += 1) { \
- if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)(right), _right_type)) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- double v, z = 0.0; \
- GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \
- if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \
- SET_DOUBLE_NULL(out); \
- continue; \
- } \
- *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \
- } \
- } \
- }
-
-#define ARRAY_LIST_ADD(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, +, TSDB_DATA_TYPE_DOUBLE, _ord)
-#define ARRAY_LIST_SUB(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, -, TSDB_DATA_TYPE_DOUBLE, _ord)
-#define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord)
-#define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord)
-#define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \
- ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord)
-
-#define TYPE_CONVERT_DOUBLE_RES(left, right, out, _type_left, _type_right, _type_res) \
- _type_left * pLeft = (_type_left *)(left); \
- _type_right *pRight = (_type_right *)(right); \
- _type_res * pOutput = (_type_res *)(out);
-
-#define DO_VECTOR_ADD(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, _output, \
- _order) \
- do { \
- TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \
- ARRAY_LIST_ADD(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \
- } while (0)
-
-#define DO_VECTOR_SUB(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, _output, \
- _order) \
- do { \
- TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \
- ARRAY_LIST_SUB(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \
- } while (0)
-
-#define DO_VECTOR_MULTIPLY(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \
- _output, _order) \
- do { \
- TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \
- ARRAY_LIST_MULTI(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \
- } while (0)
-
-#define DO_VECTOR_DIVIDE(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \
- _output, _order) \
- do { \
- TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \
- ARRAY_LIST_DIV(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \
- } while (0)
-
-#define DO_VECTOR_REMAINDER(left, numLeft, leftType, leftOriginType, right, numRight, rightType, rightOriginType, \
- _output, _order) \
- do { \
- TYPE_CONVERT_DOUBLE_RES(left, right, _output, leftOriginType, rightOriginType, double); \
- ARRAY_LIST_REM(pLeft, pRight, leftType, rightType, numLeft, numRight, pOutput, _order); \
- } while (0)
void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) {
int32_t *pLeft = (int32_t *)left;
@@ -240,2389 +59,338 @@ void calc_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight
}
}
-void vectorAdd(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType,
- void *output, int32_t order) {
- switch(leftType) {
- case TSDB_DATA_TYPE_TINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, float, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_ADD(left, numLeft, leftType, double, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- default:;
- }
-}
+typedef double (*_arithmetic_getVectorDoubleValue_fn_t)(void *src, int32_t index);
-void vectorSub(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType,
- void *output, int32_t order) {
- switch(leftType) {
- case TSDB_DATA_TYPE_TINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, float, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_SUB(left, numLeft, leftType, double, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- default:;
- }
+double getVectorDoubleValue_TINYINT(void *src, int32_t index) {
+ return (double)*((int8_t *)src + index);
+}
+double getVectorDoubleValue_UTINYINT(void *src, int32_t index) {
+ return (double)*((uint8_t *)src + index);
+}
+double getVectorDoubleValue_SMALLINT(void *src, int32_t index) {
+ return (double)*((int16_t *)src + index);
+}
+double getVectorDoubleValue_USMALLINT(void *src, int32_t index) {
+ return (double)*((uint16_t *)src + index);
+}
+double getVectorDoubleValue_INT(void *src, int32_t index) {
+ return (double)*((int32_t *)src + index);
+}
+double getVectorDoubleValue_UINT(void *src, int32_t index) {
+ return (double)*((uint32_t *)src + index);
+}
+double getVectorDoubleValue_BIGINT(void *src, int32_t index) {
+ return (double)*((int64_t *)src + index);
+}
+double getVectorDoubleValue_UBIGINT(void *src, int32_t index) {
+ return (double)*((uint64_t *)src + index);
+}
+double getVectorDoubleValue_FLOAT(void *src, int32_t index) {
+ return (double)*((float *)src + index);
+}
+double getVectorDoubleValue_DOUBLE(void *src, int32_t index) {
+ return (double)*((double *)src + index);
+}
+_arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFn(int32_t srcType) {
+ _arithmetic_getVectorDoubleValue_fn_t p = NULL;
+ if(srcType==TSDB_DATA_TYPE_TINYINT) {
+ p = getVectorDoubleValue_TINYINT;
+ }else if(srcType==TSDB_DATA_TYPE_UTINYINT) {
+ p = getVectorDoubleValue_UTINYINT;
+ }else if(srcType==TSDB_DATA_TYPE_SMALLINT) {
+ p = getVectorDoubleValue_SMALLINT;
+ }else if(srcType==TSDB_DATA_TYPE_USMALLINT) {
+ p = getVectorDoubleValue_USMALLINT;
+ }else if(srcType==TSDB_DATA_TYPE_INT) {
+ p = getVectorDoubleValue_INT;
+ }else if(srcType==TSDB_DATA_TYPE_UINT) {
+ p = getVectorDoubleValue_UINT;
+ }else if(srcType==TSDB_DATA_TYPE_BIGINT) {
+ p = getVectorDoubleValue_BIGINT;
+ }else if(srcType==TSDB_DATA_TYPE_UBIGINT) {
+ p = getVectorDoubleValue_UBIGINT;
+ }else if(srcType==TSDB_DATA_TYPE_FLOAT) {
+ p = getVectorDoubleValue_FLOAT;
+ }else if(srcType==TSDB_DATA_TYPE_DOUBLE) {
+ p = getVectorDoubleValue_DOUBLE;
+ }else {
+ assert(0);
+ }
+ return p;
}
-void vectorMultiply(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType,
- void *output, int32_t order) {
- switch(leftType) {
- case TSDB_DATA_TYPE_TINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, float, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_MULTIPLY(left, numLeft, leftType, double, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- default:;
- }
+
+typedef void* (*_arithmetic_getVectorValueAddr_fn_t)(void *src, int32_t index);
+
+void* getVectorValueAddr_TINYINT(void *src, int32_t index) {
+ return (void*)((int8_t *)src + index);
+}
+void* getVectorValueAddr_UTINYINT(void *src, int32_t index) {
+ return (void*)((uint8_t *)src + index);
+}
+void* getVectorValueAddr_SMALLINT(void *src, int32_t index) {
+ return (void*)((int16_t *)src + index);
+}
+void* getVectorValueAddr_USMALLINT(void *src, int32_t index) {
+ return (void*)((uint16_t *)src + index);
+}
+void* getVectorValueAddr_INT(void *src, int32_t index) {
+ return (void*)((int32_t *)src + index);
+}
+void* getVectorValueAddr_UINT(void *src, int32_t index) {
+ return (void*)((uint32_t *)src + index);
+}
+void* getVectorValueAddr_BIGINT(void *src, int32_t index) {
+ return (void*)((int64_t *)src + index);
+}
+void* getVectorValueAddr_UBIGINT(void *src, int32_t index) {
+ return (void*)((uint64_t *)src + index);
+}
+void* getVectorValueAddr_FLOAT(void *src, int32_t index) {
+ return (void*)((float *)src + index);
+}
+void* getVectorValueAddr_DOUBLE(void *src, int32_t index) {
+ return (void*)((double *)src + index);
}
-void vectorDivide(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType,
- void *output, int32_t order) {
- switch(leftType) {
- case TSDB_DATA_TYPE_TINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, float, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_DIVIDE(left, numLeft, leftType, double, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- default:;
- }
+_arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFn(int32_t srcType) {
+ _arithmetic_getVectorValueAddr_fn_t p = NULL;
+ if(srcType==TSDB_DATA_TYPE_TINYINT) {
+ p = getVectorValueAddr_TINYINT;
+ }else if(srcType==TSDB_DATA_TYPE_UTINYINT) {
+ p = getVectorValueAddr_UTINYINT;
+ }else if(srcType==TSDB_DATA_TYPE_SMALLINT) {
+ p = getVectorValueAddr_SMALLINT;
+ }else if(srcType==TSDB_DATA_TYPE_USMALLINT) {
+ p = getVectorValueAddr_USMALLINT;
+ }else if(srcType==TSDB_DATA_TYPE_INT) {
+ p = getVectorValueAddr_INT;
+ }else if(srcType==TSDB_DATA_TYPE_UINT) {
+ p = getVectorValueAddr_UINT;
+ }else if(srcType==TSDB_DATA_TYPE_BIGINT) {
+ p = getVectorValueAddr_BIGINT;
+ }else if(srcType==TSDB_DATA_TYPE_UBIGINT) {
+ p = getVectorValueAddr_UBIGINT;
+ }else if(srcType==TSDB_DATA_TYPE_FLOAT) {
+ p = getVectorValueAddr_FLOAT;
+ }else if(srcType==TSDB_DATA_TYPE_DOUBLE) {
+ p = getVectorValueAddr_DOUBLE;
+ }else {
+ assert(0);
+ }
+ return p;
}
-void vectorRemainder(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight, int32_t rightType,
- void *output, int32_t order) {
- switch(leftType) {
- case TSDB_DATA_TYPE_TINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint8_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint16_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint32_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, int64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, uint64_t, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, float, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- switch (rightType) {
- case TSDB_DATA_TYPE_TINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UTINYINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint8_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_SMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_USMALLINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint16_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_INT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint32_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_BIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, int64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_UBIGINT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, uint64_t, output, order);
- break;
- }
- case TSDB_DATA_TYPE_FLOAT: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, float, output, order);
- break;
- }
- case TSDB_DATA_TYPE_DOUBLE: {
- DO_VECTOR_REMAINDER(left, numLeft, leftType, double, right, numRight, rightType, double, output, order);
- break;
- }
- default:
- assert(0);
- }
- break;
- }
- default:;
- }
+void vectorAdd(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) {
+ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1;
+ int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
+ double *output=(double*)out;
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type);
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type);
+
+ if ((len1) == (len2)) {
+ for (; i < (len2) && i >= 0; i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len1) == 1) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) + getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len2) == 1) {
+ for (; i >= 0 && i < (len1); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) + getVectorDoubleValueFnRight(right,0));
+ }
+ }
+}
+void vectorSub(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) {
+ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1;
+ int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
+ double *output=(double*)out;
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type);
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type);
+
+ if ((len1) == (len2)) {
+ for (; i < (len2) && i >= 0; i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len1) == 1) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len2) == 1) {
+ for (; i >= 0 && i < (len1); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - getVectorDoubleValueFnRight(right,0));
+ }
+ }
+}
+void vectorMultiply(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) {
+ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1;
+ int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
+ double *output=(double*)out;
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type);
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type);
+
+ if ((len1) == (len2)) {
+ for (; i < (len2) && i >= 0; i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len1) == 1) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) * getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len2) == 1) {
+ for (; i >= 0 && i < (len1); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) * getVectorDoubleValueFnRight(right,0));
+ }
+ }
+}
+void vectorDivide(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) {
+ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1;
+ int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
+ double *output=(double*)out;
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type);
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type);
+
+ if ((len1) == (len2)) {
+ for (; i < (len2) && i >= 0; i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len1) == 1) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) /getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len2) == 1) {
+ for (; i >= 0 && i < (len1); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) /getVectorDoubleValueFnRight(right,0));
+ }
+ }
+}
+void vectorRemainder(void *left, int32_t len1, int32_t _left_type, void *right, int32_t len2, int32_t _right_type, void *out, int32_t _ord) {
+ int32_t i = (_ord == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1;
+ int32_t step = (_ord == TSDB_ORDER_ASC) ? 1 : -1;
+ double *output=(double*)out;
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnLeft = getVectorValueAddrFn(_left_type);
+ _arithmetic_getVectorValueAddr_fn_t getVectorValueAddrFnRight = getVectorValueAddrFn(_right_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnLeft = getVectorDoubleValueFn(_left_type);
+ _arithmetic_getVectorDoubleValue_fn_t getVectorDoubleValueFnRight = getVectorDoubleValueFn(_right_type);
+
+ if (len1 == (len2)) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i));
+ }
+ } else if (len1 == 1) {
+ for (; i >= 0 && i < (len2); i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,0), _left_type) || isNull(getVectorValueAddrFnRight(right,i), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,i));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,0) - ((int64_t)(getVectorDoubleValueFnLeft(left,0) / getVectorDoubleValueFnRight(right,i))) * getVectorDoubleValueFnRight(right,i));
+ }
+ } else if ((len2) == 1) {
+ for (; i >= 0 && i < len1; i += step, output += 1) {
+ if (isNull(getVectorValueAddrFnLeft(left,i), _left_type) || isNull(getVectorValueAddrFnRight(right,0), _right_type)) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ double v, u = 0.0;
+ GET_TYPED_DATA(v, double, _right_type, getVectorValueAddrFnRight(right,0));
+ if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &u) == 0) {
+ SET_DOUBLE_NULL(output);
+ continue;
+ }
+ SET_DOUBLE_VAL(output,getVectorDoubleValueFnLeft(left,i) - ((int64_t)(getVectorDoubleValueFnLeft(left,i) / getVectorDoubleValueFnRight(right,0))) * getVectorDoubleValueFnRight(right,0));
+ }
+ }
}
_arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr) {
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index a3a6c0fed40e053b33dd8c77a031ea9eabf00664..61378c79c4b5c44ffa11ae9132aa6f8b89ab5f71 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
- if (pBuilder->columns == NULL) return -1;
+ STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
+ if (columns == NULL) return -1;
+ pBuilder->columns = columns;
}
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
@@ -447,6 +448,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int dcol = 0;
while (dcol < pCols->numOfCols) {
+ bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
@@ -457,13 +459,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
+ if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
- if(forceSetNull) {
+ if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
dcol++;
@@ -481,6 +484,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
+ bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
@@ -492,13 +496,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
+ if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
- if (forceSetNull) {
+ if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
}
++dcol;
@@ -532,7 +537,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
- if (source->cols[j].len > 0) {
+ if (source->cols[j].len > 0 || target->cols[j].len > 0) {
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
target->maxPoints);
}
@@ -576,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
if (key1 < key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
- if (src1->cols[i].len > 0) {
+ if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
}
@@ -594,6 +599,8 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
+ } else if(target->cols[i].len > 0) {
+ dataColSetNullAt(&target->cols[i], target->numOfRows);
}
}
target->numOfRows++;
diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c
index ebdb33fd5b804e169a5e8ffc0b9a59e8dc0a331e..cc2bb8803badc2aae2e80200691be0439bac3afe 100644
--- a/src/common/src/texpr.c
+++ b/src/common/src/texpr.c
@@ -325,14 +325,6 @@ static void* exception_calloc(size_t nmemb, size_t size) {
return p;
}
-static void* exception_malloc(size_t size) {
- void* p = malloc(size);
- if (p == NULL) {
- THROW(TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
- return p;
-}
-
static UNUSED_FUNC char* exception_strdup(const char* str) {
char* p = strdup(str);
if (p == NULL) {
@@ -395,77 +387,6 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size) {
return exprTreeFromBinaryImpl(&br);
}
-tExprNode* exprTreeFromTableName(const char* tbnameCond) {
- if (!tbnameCond) {
- return NULL;
- }
-
- int32_t anchor = CLEANUP_GET_ANCHOR();
-
- tExprNode* expr = exception_calloc(1, sizeof(tExprNode));
- CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
-
- expr->nodeType = TSQL_NODE_EXPR;
-
- tExprNode* left = exception_calloc(1, sizeof(tExprNode));
- expr->_node.pLeft = left;
-
- left->nodeType = TSQL_NODE_COL;
- SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
- left->pSchema = pSchema;
-
- *pSchema = *tGetTbnameColumnSchema();
-
- tExprNode* right = exception_calloc(1, sizeof(tExprNode));
- expr->_node.pRight = right;
-
- if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN) == 0) {
- right->nodeType = TSQL_NODE_VALUE;
- expr->_node.optr = TSDB_RELATION_LIKE;
- tVariant* pVal = exception_calloc(1, sizeof(tVariant));
- right->pVal = pVal;
- size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN) + 1;
- pVal->pz = exception_malloc(len);
- memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_LIKE_LEN, len);
- pVal->nType = TSDB_DATA_TYPE_BINARY;
- pVal->nLen = (int32_t)len;
-
- } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) {
- right->nodeType = TSQL_NODE_VALUE;
- expr->_node.optr = TSDB_RELATION_IN;
- tVariant* pVal = exception_calloc(1, sizeof(tVariant));
- right->pVal = pVal;
- pVal->nType = TSDB_DATA_TYPE_POINTER_ARRAY;
- pVal->arr = taosArrayInit(2, POINTER_BYTES);
-
- const char* cond = tbnameCond + QUERY_COND_REL_PREFIX_IN_LEN;
- for (const char *e = cond; *e != 0; e++) {
- if (*e == TS_PATH_DELIMITER[0]) {
- cond = e + 1;
- } else if (*e == ',') {
- size_t len = e - cond;
- char* p = exception_malloc(len + VARSTR_HEADER_SIZE);
- STR_WITH_SIZE_TO_VARSTR(p, cond, (VarDataLenT)len);
- cond += len;
- taosArrayPush(pVal->arr, &p);
- }
- }
-
- if (*cond != 0) {
- size_t len = strlen(cond) + VARSTR_HEADER_SIZE;
-
- char* p = exception_malloc(len);
- STR_WITH_SIZE_TO_VARSTR(p, cond, (VarDataLenT)(len - VARSTR_HEADER_SIZE));
- taosArrayPush(pVal->arr, &p);
- }
-
- taosArraySortString(pVal->arr, taosArrayCompareString);
- }
-
- CLEANUP_EXECUTE_TO(anchor, false);
- return expr;
-}
-
void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
SBufferReader br = tbufInitReader(buf, len, false);
uint32_t type = tbufReadUint32(&br);
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index d1b816f122c40fb1c5dea9ec8fa9f0406142d5de..339fa35bb3009db96c9c6e0cabea6b60881f05c5 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -75,9 +75,18 @@ int32_t tsMaxBinaryDisplayWidth = 30;
*/
int32_t tsCompressMsgSize = -1;
+/* denote if server needs to compress the retrieved column data before adding to the rpc response message body.
+ * 0: all data are compressed
+ * -1: all data are not compressed
+ * other values: if any retrieved column size is greater than the tsCompressColData, all data will be compressed.
+ */
+int32_t tsCompressColData = -1;
+
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
-int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
+int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
+int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN;
+
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
@@ -87,6 +96,9 @@ int32_t tsMaxNumOfOrderedResults = 100000;
// 10 ms for sliding time, the value will changed in case of time precision changed
int32_t tsMinSlidingTime = 10;
+// the maxinum number of distict query result
+int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
+
// 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
@@ -139,6 +151,11 @@ int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
+int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
+
+// tsdb config
+// For backward compatibility
+bool tsdbForceKeepFile = false;
// balance
int8_t tsEnableBalance = 1;
@@ -160,6 +177,7 @@ int32_t tsHttpMaxThreads = 2;
int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0;
+int8_t tsHttpDbNameMandatory = 0;
// mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
@@ -194,6 +212,7 @@ char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0;
+int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
SDiskCfg tsDiskCfg[1];
@@ -260,6 +279,9 @@ uint32_t curRange = 100; // range
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif
+// long query death-lock
+int8_t tsDeadLockKillQuery = 0;
+
int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL;
void (*monExecuteSQLFp)(char *sql) = NULL;
@@ -544,6 +566,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "maxNumOfDistinctRes";
+ cfg.ptr = &tsMaxNumOfDistinctResults;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 10*10000;
+ cfg.maxValue = 10000*10000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -977,6 +1009,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "compressColData";
+ cfg.ptr = &tsCompressColData;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = -1;
+ cfg.maxValue = 100000000.0f;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "maxSQLLength";
cfg.ptr = &tsMaxSQLStringLen;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -997,6 +1039,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);
+ cfg.option = "maxRegexStringLen";
+ cfg.ptr = &tsMaxRegexStringLen;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = TSDB_MAX_FIELD_LEN;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_BYTE;
+ taosInitConfigOption(cfg);
+
cfg.option = "maxNumOfOrderedRes";
cfg.ptr = &tsMaxNumOfOrderedResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1201,6 +1253,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "topicBianryLen";
+ cfg.ptr = &tsTopicBianryLen;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 16;
+ cfg.maxValue = 16000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1241,6 +1303,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "httpDbNameMandatory";
+ cfg.ptr = &tsHttpDbNameMandatory;
+ cfg.valType = TAOS_CFG_VTYPE_INT8;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// debug flag
cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines;
@@ -1544,7 +1616,27 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM);
+ cfg.option = "tsdbMetaCompactRatio";
+ cfg.ptr = &tsTsdbMetaCompactRatio;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 0;
+ cfg.maxValue = 100;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
+ // enable kill long query
+ cfg.option = "deadLockKillQuery";
+ cfg.ptr = &tsDeadLockKillQuery;
+ cfg.valType = TAOS_CFG_VTYPE_INT8;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 1;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
#ifdef TD_TSZ
// lossy compress
cfg.option = "lossyColumns";
@@ -1598,6 +1690,9 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM);
+#else
+ assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5);
#endif
}
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index e92169f097ac07fb97064beed9cd9b9741513e57..532333651df89ab16ce092e1b3d7c92806b8c883 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -70,12 +70,11 @@ SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFil
memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
for (int32_t j = 0; j < numOfFilters; ++j) {
-
if (pFilter[j].filterstr) {
size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
pFilter[j].pz = (int64_t) calloc(1, len);
- memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
+ memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
}
}
diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c
index 13c4160e7fc9e67528c61f2e085888eb489a8f89..08bfc2e9aa6f0b9337d484c725f2737cbbacaac0 100644
--- a/src/common/src/ttypes.c
+++ b/src/common/src/ttypes.c
@@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = {
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
do { \
- if (_list[(_index)] >= (INT64_MAX - (__sum))) { \
- __sum = INT64_MAX; \
- } else { \
- (__sum) += (_list)[(_index)]; \
- } \
+ (__sum) += (_list)[(_index)]; \
if ((__min) > (_list)[(_index)]) { \
(__min) = (_list)[(_index)]; \
(__minIndex) = (_index); \
diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c
index a491df6f988ef4b2abac44b12066df236e6ba90d..ca3bb956a2fef4fa98450181b4378025013bb735 100644
--- a/src/common/src/tvariant.c
+++ b/src/common/src/tvariant.c
@@ -38,12 +38,12 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
switch (token->type) {
case TSDB_DATA_TYPE_BOOL: {
- int32_t k = strncasecmp(token->z, "true", 4);
- if (k == 0) {
+ if (strncasecmp(token->z, "true", 4) == 0) {
pVar->i64 = TSDB_TRUE;
- } else {
- assert(strncasecmp(token->z, "false", 5) == 0);
+ } else if (strncasecmp(token->z, "false", 5) == 0) {
pVar->i64 = TSDB_FALSE;
+ } else {
+ return;
}
break;
diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin
index a44ec1ca493ad01b2bf825b6418f69e11f548206..4a4d79099b076b8ff12d5b4fdbcba54049a6866d 160000
--- a/src/connector/grafanaplugin
+++ b/src/connector/grafanaplugin
@@ -1 +1 @@
-Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206
+Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index e432dac1cea593b371a173f334e5313236091ab3..1e5cede714820f29defe3c6b458b2daf467bc4d2 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index ef57198e78d2268faba526d5506b0dc384f5766f..7caf46848d18c4491cdea1ab50df31d8d2d26daf 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.34
+ 2.0.35
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index fbeeeb56d30357294663a5ebf64608c57e066a7d..810a85f8a33b3f244dab81e349b9df786ec50c21 100644
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.34
+ 2.0.35
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -113,11 +113,11 @@
**/AppMemoryLeakTest.java
+ **/JDBCTypeAndTypeCompareTest.java
**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
**/DatetimeBefore1970Test.java
**/FailOverTest.java
**/InvalidResultSetPointerTest.java
- **/RestfulConnectionTest.java
**/TSDBJNIConnectorTest.java
**/TaosInfoMonitorTest.java
**/UnsignedNumberJniTest.java
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index 521a88b128ff930510bf00cdcb6a12cbc3211742..307451e014c59c1c3419f1a9daff4f89e8b90d46 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -118,9 +118,6 @@ public class TSDBDriver extends AbstractDriver {
}
public Connection connect(String url, Properties info) throws SQLException {
- if (url == null)
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_URL_NOT_SET);
-
if (!acceptsURL(url))
return null;
@@ -135,8 +132,7 @@ public class TSDBDriver extends AbstractDriver {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED);
try {
- TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
- (String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
+ TSDBJNIConnector.init(props);
return new TSDBConnection(props, this.dbMetaData);
} catch (SQLWarning sqlWarning) {
sqlWarning.printStackTrace();
@@ -205,6 +201,7 @@ public class TSDBDriver extends AbstractDriver {
String dbProductName = url.substring(0, beginningOfSlashes);
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
+ urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PRODUCT_NAME, dbProductName);
// parse database name
url = url.substring(beginningOfSlashes + 2);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
index 977ae665152fd1627cf25005061a4ca2aaa4e488..0970148b1dfb6c6c1fb85330e312bf2c8168b3c7 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java
@@ -35,18 +35,19 @@ public class TSDBError {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_USER_IS_REQUIRED, "user is required");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_PASSWORD_IS_REQUIRED, "password is required");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_JSON_FORMAT, "invalid json format");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database, please see taoslog for more details");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection is NULL");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "JNI result set is NULL");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultSet");
- TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed");
+ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed, please see taoslog for more details");
}
public static SQLException createSQLException(int errorCode) {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
index 2207db6f9379595e68b8ed00ea8f7298ca3b45ad..0f4427fa20e272917df0327552efd1a80cd56b4d 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
@@ -31,6 +31,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_RESTFul_Client_IOException = 0x2318;
public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
+ public static final int ERROR_INVALID_JSON_FORMAT = 0x231b;
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@@ -72,6 +73,7 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_RESTFul_Client_IOException);
errorNumbers.add(ERROR_USER_IS_REQUIRED);
errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
+ errorNumbers.add(ERROR_INVALID_JSON_FORMAT);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java
new file mode 100644
index 0000000000000000000000000000000000000000..31299a1c6f37a8b75521a65e7de09f5162558dd6
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBException.java
@@ -0,0 +1,22 @@
+package com.taosdata.jdbc;
+
+public class TSDBException {
+ private int code;
+ private String message;
+
+ public int getCode() {
+ return code;
+ }
+
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public void setMessage(String message) {
+ this.message = message;
+ }
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index c634fe2e9503ff19afae85f285d921f330562612..aaada2e78ec284f4019b29465a38db109cf9d80a 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -16,18 +16,21 @@
*/
package com.taosdata.jdbc;
+import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.List;
+import java.util.Properties;
/**
* JNI connector
*/
public class TSDBJNIConnector {
- private static volatile Boolean isInitialized = false;
+ private static final Object LOCK = new Object();
+ private static volatile boolean isInitialized;
private final TaosInfo taosInfo = TaosInfo.getInstance();
private long taos = TSDBConstants.JNI_NULL_POINTER; // Connection pointer used in C
@@ -38,24 +41,27 @@ public class TSDBJNIConnector {
System.loadLibrary("taos");
}
- public boolean isClosed() {
- return this.taos == TSDBConstants.JNI_NULL_POINTER;
- }
+ public static void init(Properties props) throws SQLWarning {
+ synchronized (LOCK) {
+ if (!isInitialized) {
- public boolean isResultsetClosed() {
- return this.isResultsetClosed;
- }
+ JSONObject configJSON = new JSONObject();
+ for (String key : props.stringPropertyNames()) {
+ configJSON.put(key, props.getProperty(key));
+ }
+ setConfigImp(configJSON.toJSONString());
- public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning {
- synchronized (isInitialized) {
- if (!isInitialized) {
- initImp(configDir);
+ initImp(props.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, null));
+
+ String locale = props.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE);
if (setOptions(0, locale) < 0) {
throw TSDBError.createSQLWarning("Failed to set locale: " + locale + ". System default will be used.");
}
+ String charset = props.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET);
if (setOptions(1, charset) < 0) {
throw TSDBError.createSQLWarning("Failed to set charset: " + charset + ". System default will be used.");
}
+ String timezone = props.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
if (setOptions(2, timezone) < 0) {
throw TSDBError.createSQLWarning("Failed to set timezone: " + timezone + ". System default will be used.");
}
@@ -65,11 +71,13 @@ public class TSDBJNIConnector {
}
}
- public static native void initImp(String configDir);
+ private static native void initImp(String configDir);
- public static native int setOptions(int optionIndex, String optionValue);
+ private static native int setOptions(int optionIndex, String optionValue);
- public static native String getTsCharset();
+ private static native String getTsCharset();
+
+ private static native TSDBException setConfigImp(String config);
public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException {
if (this.taos != TSDBConstants.JNI_NULL_POINTER) {
@@ -159,6 +167,14 @@ public class TSDBJNIConnector {
private native long isUpdateQueryImp(long connection, long pSql);
+ public boolean isClosed() {
+ return this.taos == TSDBConstants.JNI_NULL_POINTER;
+ }
+
+ public boolean isResultsetClosed() {
+ return this.isResultsetClosed;
+ }
+
/**
* Free result set operation from C to release result set pointer by JNI
*/
@@ -278,25 +294,20 @@ public class TSDBJNIConnector {
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
public long prepareStmt(String sql) throws SQLException {
- long stmt;
- try {
- stmt = prepareStmtImp(sql.getBytes(), this.taos);
- } catch (Exception e) {
- e.printStackTrace();
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
- }
+ long stmt = prepareStmtImp(sql.getBytes(), this.taos);
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
- throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "connection already closed");
}
-
if (stmt == TSDBConstants.JNI_SQL_NULL) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
}
-
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
}
+ if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR);
+ }
return stmt;
}
@@ -313,8 +324,7 @@ public class TSDBJNIConnector {
private native int setBindTableNameImp(long stmt, String name, long conn);
public void setBindTableNameAndTags(long stmt, String tableName, int numOfTags, ByteBuffer tags, ByteBuffer typeList, ByteBuffer lengthList, ByteBuffer nullList) throws SQLException {
- int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(),
- nullList.array(), this.taos);
+ int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(), nullList.array(), this.taos);
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind table name and corresponding tags");
}
@@ -357,4 +367,6 @@ public class TSDBJNIConnector {
}
private native int insertLinesImp(String[] lines, long conn);
+
+
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
index 6211f61dc505d2ccba5f11f3aacc980771b1a110..ff49677b01fa1c3a4d482cebd51269d5f1589e43 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java
@@ -32,6 +32,7 @@ import java.util.List;
import com.taosdata.jdbc.utils.NullType;
public class TSDBResultSetBlockData {
+ private static final int BINARY_LENGTH_OFFSET = 2;
private int numOfRows = 0;
private int rowIndex = 0;
@@ -404,10 +405,8 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
- bb.position(fieldSize * this.rowIndex);
-
+ bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort();
-
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isBinaryNull(dest, length)) {
@@ -419,16 +418,13 @@ public class TSDBResultSetBlockData {
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
- bb.position(fieldSize * this.rowIndex);
-
+ bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex);
int length = bb.getShort();
-
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isNcharNull(dest, length)) {
return null;
}
-
try {
String charset = TaosGlobalConfig.getCharset();
return new String(dest, charset);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
index 12a0ab57e2c35c7f1f550dd213db19a0effd4ebc..e818736096355c4937e5af0470b77c95486c86db 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java
@@ -18,7 +18,7 @@ public class RestfulConnection extends AbstractConnection {
private final String url;
private final String database;
private final String token;
- /******************************************************/
+
private boolean isClosed;
private final DatabaseMetaData metadata;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index a88dc411f333fea82fa5310ee87f263b117f7e70..21c76f73b287e55ef14f5d70cf6a911a9cb543db 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -88,17 +88,24 @@ public class RestfulStatement extends AbstractStatement {
}
private String getUrl() throws SQLException {
+ String dbname = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_DBNAME);
+ if (dbname == null || dbname.trim().isEmpty()) {
+ dbname = "";
+ } else {
+ dbname = "/" + dbname.toLowerCase();
+ }
TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase());
String url;
+
switch (timestampFormat) {
case TIMESTAMP:
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt" + dbname;
break;
case UTC:
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc" + dbname;
break;
default:
- url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
+ url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql" + dbname;
}
return url;
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java
new file mode 100644
index 0000000000000000000000000000000000000000..6a983cd5bdd6d886dcac01f6085c70eade4f7cf5
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SetConfigurationInJNITest.java
@@ -0,0 +1,249 @@
+package com.taosdata.jdbc;
+
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+
+
+public class SetConfigurationInJNITest {
+
+ private String host = "127.0.0.1";
+ private String dbname = "test_set_config";
+
+ @Test
+ public void setConfigInUrl() {
+ try {
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata&debugFlag=143&rpcTimer=500");
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void setConfigInProperties() {
+ try {
+ Properties props = new Properties();
+ props.setProperty("debugFlag", "143");
+ props.setProperty("r pcTimer", "500");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ //test case1:set debugFlag=135
+ //expect:debugFlag:135
+ //result:pass
+ public void setConfigfordebugFlag() {
+ try {
+ Properties props = new Properties();
+ //set debugFlag=135
+ props.setProperty("debugFlag", "135");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case2:set debugFlag=abc (wrong type)
+ //expect:debugFlag:135
+ //result:pass
+ public void setConfigforwrongtype() {
+ try {
+ Properties props = new Properties();
+ //set debugFlag=135
+ props.setProperty("debugFlag", "abc");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case3:set rpcTimer=0 (smaller than the boundary conditions)
+ //expect:rpcTimer:300
+ //result:pass
+ public void setConfigrpcTimer() {
+ try {
+ Properties props = new Properties();
+ //set rpcTimer=0
+ props.setProperty("rpcTimer", "0");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case4:set rpcMaxTime=10000 (bigger than the boundary conditions)
+ //expect:rpcMaxTime:600
+ //result:pass
+ public void setConfigforrpcMaxTime() {
+ try {
+ Properties props = new Properties();
+ //set rpcMaxTime=10000
+ props.setProperty("rpcMaxTime", "10000");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case5:set numOfThreadsPerCore=aaa (wrong type)
+ //expect:numOfThreadsPerCore:1.0
+ //result:pass
+ public void setConfigfornumOfThreadsPerCore() {
+ try {
+ Properties props = new Properties();
+ //set numOfThreadsPerCore=aaa
+ props.setProperty("numOfThreadsPerCore", "aaa");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case6:set numOfThreadsPerCore=100000 (bigger than the boundary conditions)
+ //expect:numOfThreadsPerCore:1.0
+ //result:pass
+ public void setConfignumOfThreadsPerCore() {
+ try {
+ Properties props = new Properties();
+ //set numOfThreadsPerCore=100000
+ props.setProperty("numOfThreadsPerCore", "100000");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ // test case7:set both true and wrong config(debugFlag=0,rpcDebugFlag=143,cDebugFlag=143,rpcTimer=100000)
+ // expect:rpcDebugFlag:143,cDebugFlag:143,rpcTimer:300
+ // result:pass
+ public void setConfigformaxTmrCtrl() {
+ try {
+ Properties props = new Properties();
+ props.setProperty("debugFlag", "0");
+ props.setProperty("rpcDebugFlag", "143");
+ props.setProperty("cDebugFlag", "143");
+ props.setProperty("rpcTimer", "100000");
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", props);
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ @Test
+ //test case 8:use url to set with wrong type(debugFlag=abc,rpcTimer=abc)
+ //expect:default value
+ //result:pass
+ public void setConfigInUrlwithwrongtype() {
+ try {
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata&debugFlag=abc&rpcTimer=abc");
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("drop database if exists " + dbname);
+
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
index 88ff5d3a811e17aaabbeb0a451fbff010307ab6d..8be6ae6b1c566abcd7ec398e7df3f5308e29e1b1 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
@@ -5,9 +5,9 @@ import org.junit.Test;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.sql.SQLException;
-import java.sql.SQLWarning;
import java.util.ArrayList;
import java.util.List;
+import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -19,25 +19,25 @@ public class TSDBJNIConnectorTest {
@Test
public void test() {
try {
-
try {
//change sleepSeconds when debugging with attach to process to find PID
int sleepSeconds = -1;
- if (sleepSeconds>0) {
+ if (sleepSeconds > 0) {
RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
String jvmName = runtimeBean.getName();
long pid = Long.valueOf(jvmName.split("@")[0]);
System.out.println("JVM PID = " + pid);
- Thread.sleep(sleepSeconds*1000);
+ Thread.sleep(sleepSeconds * 1000);
}
- }
- catch (Exception e) {
+ } catch (Exception e) {
e.printStackTrace();
}
// init
- TSDBJNIConnector.init("/etc/taos", null, null, null);
+ Properties properties = new Properties();
+ properties.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
+ TSDBJNIConnector.init(properties);
// connect
TSDBJNIConnector connector = new TSDBJNIConnector();
@@ -45,12 +45,12 @@ public class TSDBJNIConnectorTest {
// setup
String setupSqlStrs[] = {"create database if not exists d precision \"us\"",
- "create table if not exists d.t(ts timestamp, f int)",
- "create database if not exists d2",
- "create table if not exists d2.t2(ts timestamp, f int)",
- "insert into d.t values(now+100s, 100)",
- "insert into d2.t2 values(now+200s, 200)"
- };
+ "create table if not exists d.t(ts timestamp, f int)",
+ "create database if not exists d2",
+ "create table if not exists d2.t2(ts timestamp, f int)",
+ "insert into d.t values(now+100s, 100)",
+ "insert into d2.t2 values(now+200s, 200)"
+ };
for (String setupSqlStr : setupSqlStrs) {
long setupSql = connector.executeQuery(setupSqlStr);
@@ -115,15 +115,13 @@ public class TSDBJNIConnectorTest {
}
// close statement
connector.executeQuery("use d");
- String[] lines = new String[] {"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
- "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"};
+ String[] lines = new String[]{"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
+ "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"};
connector.insertLines(lines);
// close connection
connector.closeConnection();
- } catch (SQLWarning throwables) {
- throwables.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
@@ -140,11 +138,7 @@ public class TSDBJNIConnectorTest {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL);
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0);
- } else if (code == TSDBConstants.JNI_FETCH_END) {
- return false;
- } else {
- return true;
- }
+ } else return code != TSDBConstants.JNI_FETCH_END;
}
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
index 6bddd3f42835e6706ef922f2175d6e9a36dcf509..3d76e1f98d4f8aa1d0ba3d68395e4036c5b069e6 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java
@@ -586,6 +586,130 @@ public class TSDBPreparedStatementTest {
Assert.assertEquals(numOfRows, rows);
}
+ @Test
+ public void bindDataQueryTest() throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))");
+
+ int numOfRows = 1;
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)");
+ s.setTableName("w2");
+ s.setTagInt(0, 1);
+ s.setTagString(1, "test");
+
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add("test" + i % 4);
+ }
+ s.setString(1, s2, 10);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+
+ String sql = "select * from weather_test where t1 >= ? and t1 <= ?";
+ TSDBPreparedStatement s1 = (TSDBPreparedStatement) conn.prepareStatement(sql);
+ s1.setInt(1, 0);
+ s1.setInt(2, 10);
+
+ ResultSet rs = s1.executeQuery();
+ int rows = 0;
+ while (rs.next()) {
+ rows++;
+ }
+ Assert.assertEquals(numOfRows, rows);
+ }
+
+ @Test
+ public void setTagNullTest()throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(10), t9 nchar(10))");
+
+ int numOfRows = 1;
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?,?,?,?,?,?,?,?) values(?, ?)");
+ s.setTableName("w3");
+ s.setTagNull(0, TSDBConstants.TSDB_DATA_TYPE_TINYINT);
+ s.setTagNull(1, TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
+ s.setTagNull(2, TSDBConstants.TSDB_DATA_TYPE_INT);
+ s.setTagNull(3, TSDBConstants.TSDB_DATA_TYPE_BIGINT);
+ s.setTagNull(4, TSDBConstants.TSDB_DATA_TYPE_FLOAT);
+ s.setTagNull(5, TSDBConstants.TSDB_DATA_TYPE_DOUBLE);
+ s.setTagNull(6, TSDBConstants.TSDB_DATA_TYPE_BOOL);
+ s.setTagNull(7, TSDBConstants.TSDB_DATA_TYPE_BINARY);
+ s.setTagNull(8, TSDBConstants.TSDB_DATA_TYPE_NCHAR);
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add(i);
+ }
+ s.setInt(1, s2);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+ }
+
+ private String stringGenerator(int length) {
+ String source = "abcdefghijklmnopqrstuvwxyz";
+ StringBuilder sb = new StringBuilder();
+ Random rand = new Random();
+ for(int i = 0; i < length; i++) {
+ sb.append(source.charAt(rand.nextInt(26)));
+ }
+ return sb.toString();
+ }
+
+ @Test(expected = SQLException.class)
+ public void setMaxTableNameTest()throws SQLException {
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop table if exists weather_test");
+ stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 int)");
+
+ TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?)");
+ String tbname = stringGenerator(193);
+ s.setTableName(tbname);
+ s.setTagInt(0, 1);
+
+ int numOfRows = 1;
+
+ ArrayList ts = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ ts.add(System.currentTimeMillis() + i);
+ }
+ s.setTimestamp(0, ts);
+
+ ArrayList s2 = new ArrayList<>();
+ for (int i = 0; i < numOfRows; i++) {
+ s2.add(i);
+ }
+ s.setInt(1, s2);
+
+ s.columnDataAddBatch();
+ s.columnDataExecuteBatch();
+ s.columnDataCloseBatch();
+ }
+
+
@Test(expected = SQLException.class)
public void createTwoSameDbTest() throws SQLException {
// when
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..eb3b2985dfaff1b956909a50ca23470279cb48ca
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/JDBCTypeAndTypeCompareTest.java
@@ -0,0 +1,34 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.Test;
+
+import java.sql.*;
+
+public class JDBCTypeAndTypeCompareTest {
+
+ @Test
+ public void test() throws SQLException {
+ Connection conn = DriverManager.getConnection("jdbc:TAOS://192.168.17.156:6030/", "root", "taosdata");
+ Statement stmt = conn.createStatement();
+
+ stmt.execute("drop database if exists test");
+ stmt.execute("create database if not exists test");
+ stmt.execute("use test");
+ stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10) )");
+ stmt.execute("insert into weather values(now, 1, 2, 3.0, 4.0, 5, 6, true, 'test','test')");
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ ResultSetMetaData meta = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= meta.getColumnCount(); i++) {
+ String columnName = meta.getColumnName(i);
+ String columnTypeName = meta.getColumnTypeName(i);
+ Object value = rs.getObject(i);
+ System.out.printf("columnName : %s, columnTypeName: %s, JDBCType: %s\n", columnName, columnTypeName, value.getClass().getName());
+ }
+ }
+
+ stmt.close();
+ conn.close();
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..18a2c32aca0535567dd42e886bc87ae618596a40
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiConnectionWithDifferentDbTest.java
@@ -0,0 +1,101 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class MultiConnectionWithDifferentDbTest {
+
+ private static String host = "127.0.0.1";
+ private static String db1 = "db1";
+ private static String db2 = "db2";
+
+ private long ts;
+
+ @Test
+ public void test() {
+ List threads = IntStream.range(1, 3).mapToObj(i -> new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < 10; j++) {
+ queryDb();
+ try {
+ TimeUnit.SECONDS.sleep(1);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private void queryDb() {
+ String url = "jdbc:TAOS-RS://" + host + ":6041/db" + i + "?user=root&password=taosdata";
+ try (Connection connection = DriverManager.getConnection(url)) {
+ Statement stmt = connection.createStatement();
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ assertNotNull(rs);
+ rs.next();
+ long actual = rs.getTimestamp("ts").getTime();
+ assertEquals(ts, actual);
+
+ int f1 = rs.getInt("f1");
+ assertEquals(i, f1);
+
+ String loc = i == 1 ? "beijing" : "shanghai";
+ String loc_actual = rs.getString("loc");
+ assertEquals(loc, loc_actual);
+
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ }, "thread-" + i)).collect(Collectors.toList());
+
+ threads.forEach(Thread::start);
+
+ for (Thread t : threads) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ }
+
+ @Before
+ public void before() {
+ ts = System.currentTimeMillis();
+
+ try {
+ Connection conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
+
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + db1);
+ stmt.execute("create database if not exists " + db1);
+ stmt.execute("use " + db1);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('beijing') values(" + ts + ", 1)");
+
+ stmt.execute("drop database if exists " + db2);
+ stmt.execute("create database if not exists " + db2);
+ stmt.execute("use " + db2);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('shanghai') values(" + ts + ", 2)");
+
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UseNowInsertTimestampTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UseNowInsertTimestampTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..fbce021d1bff3655eedcf487dbcbf4747d5f9897
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UseNowInsertTimestampTest.java
@@ -0,0 +1,84 @@
+package com.taosdata.jdbc.cases;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class UseNowInsertTimestampTest {
+ String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
+
+ @Test
+ public void millisec() {
+ try (Connection conn = DriverManager.getConnection(url)) {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test");
+ stmt.execute("create database if not exists test precision 'ms'");
+ stmt.execute("use test");
+ stmt.execute("create table weather(ts timestamp, f1 int)");
+ stmt.execute("insert into weather values(now, 1)");
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ rs.next();
+ Timestamp ts = rs.getTimestamp("ts");
+ assertEquals(13, Long.toString(ts.getTime()).length());
+
+ int nanos = ts.getNanos();
+ assertEquals(0, nanos % 1000_000);
+
+ stmt.execute("drop database if exists test");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void microsec() {
+ try (Connection conn = DriverManager.getConnection(url)) {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test");
+ stmt.execute("create database if not exists test precision 'us'");
+ stmt.execute("use test");
+ stmt.execute("create table weather(ts timestamp, f1 int)");
+ stmt.execute("insert into weather values(now, 1)");
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ rs.next();
+ Timestamp ts = rs.getTimestamp("ts");
+ int nanos = ts.getNanos();
+
+ assertEquals(0, nanos % 1000);
+
+ stmt.execute("drop database if exists test");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void nanosec() {
+ try (Connection conn = DriverManager.getConnection(url)) {
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test");
+ stmt.execute("create database if not exists test precision 'ns'");
+ stmt.execute("use test");
+ stmt.execute("create table weather(ts timestamp, f1 int)");
+ stmt.execute("insert into weather values(now, 1)");
+
+ ResultSet rs = stmt.executeQuery("select * from weather");
+ rs.next();
+
+ Timestamp ts = rs.getTimestamp("ts");
+
+ int nanos = ts.getNanos();
+ assertTrue(nanos % 1000 != 0);
+
+ stmt.execute("drop database if exists test");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..9fe51e7203fac7133783e47fd5b0cc07f33b2494
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/DatabaseSpecifiedTest.java
@@ -0,0 +1,69 @@
+package com.taosdata.jdbc.rs;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+
+import static org.junit.Assert.*;
+
+public class DatabaseSpecifiedTest {
+
+ private static String host = "127.0.0.1";
+ private static String dbname = "test_db_spec";
+
+ private Connection connection;
+ private long ts;
+
+ @Test
+ public void test() throws SQLException {
+ // when
+ connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/" + dbname + "?user=root&password=taosdata");
+ try (Statement stmt = connection.createStatement();) {
+ ResultSet rs = stmt.executeQuery("select * from weather");
+
+ //then
+ assertNotNull(rs);
+ rs.next();
+ long now = rs.getTimestamp("ts").getTime();
+ assertEquals(ts, now);
+ int f1 = rs.getInt(2);
+ assertEquals(1, f1);
+ String loc = rs.getString("loc");
+ assertEquals("beijing", loc);
+ }
+ connection.close();
+ }
+
+ @Before
+ public void before() {
+ ts = System.currentTimeMillis();
+ try {
+ Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
+ Statement stmt = connection.createStatement();
+
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
+ stmt.execute("insert into t1 using weather tags('beijing') values( " + ts + ", 1)");
+
+ stmt.close();
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try {
+ if (connection != null)
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
index abd60f5b63d46b406f19b6be9dcbbab6b786de12..1c5c03aacb5e7ed5683c75414975224a67d49e21 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java
@@ -9,6 +9,8 @@ import org.junit.Test;
import java.sql.*;
import java.util.Properties;
+import static org.junit.Assert.assertEquals;
+
public class RestfulConnectionTest {
private static final String host = "127.0.0.1";
@@ -26,7 +28,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
} catch (SQLException e) {
e.printStackTrace();
}
@@ -38,7 +40,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -49,7 +51,7 @@ public class RestfulConnectionTest {
@Test
public void nativeSQL() throws SQLException {
String nativeSQL = conn.nativeSQL("select * from log.log");
- Assert.assertEquals("select * from log.log", nativeSQL);
+ assertEquals("select * from log.log", nativeSQL);
}
@Test
@@ -87,7 +89,7 @@ public class RestfulConnectionTest {
public void getMetaData() throws SQLException {
DatabaseMetaData meta = conn.getMetaData();
Assert.assertNotNull(meta);
- Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
+ assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
}
@Test
@@ -103,25 +105,25 @@ public class RestfulConnectionTest {
@Test
public void setCatalog() throws SQLException {
conn.setCatalog("test");
- Assert.assertEquals("test", conn.getCatalog());
+ assertEquals("test", conn.getCatalog());
}
@Test
public void getCatalog() throws SQLException {
conn.setCatalog("log");
- Assert.assertEquals("log", conn.getCatalog());
+ assertEquals("log", conn.getCatalog());
}
@Test(expected = SQLFeatureNotSupportedException.class)
public void setTransactionIsolation() throws SQLException {
conn.setTransactionIsolation(Connection.TRANSACTION_NONE);
- Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
+ assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
}
@Test
public void getTransactionIsolation() throws SQLException {
- Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
+ assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
}
@Test
@@ -140,7 +142,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
}
@@ -152,7 +154,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
}
@@ -175,13 +177,13 @@ public class RestfulConnectionTest {
@Test(expected = SQLFeatureNotSupportedException.class)
public void setHoldability() throws SQLException {
conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
- Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
+ assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
}
@Test
public void getHoldability() throws SQLException {
- Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
+ assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -210,7 +212,7 @@ public class RestfulConnectionTest {
ResultSet rs = stmt.executeQuery("select server_status()");
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
@@ -222,7 +224,7 @@ public class RestfulConnectionTest {
ResultSet rs = pstmt.executeQuery();
rs.next();
int status = rs.getInt("server_status()");
- Assert.assertEquals(1, status);
+ assertEquals(1, status);
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
@@ -299,11 +301,11 @@ public class RestfulConnectionTest {
Properties info = conn.getClientInfo();
String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET);
- Assert.assertEquals("UTF-8", charset);
+ assertEquals("UTF-8", charset);
String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE);
- Assert.assertEquals("en_US.UTF-8", locale);
+ assertEquals("en_US.UTF-8", locale);
String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
- Assert.assertEquals("UTC-8", timezone);
+ assertEquals("UTC-8", timezone);
}
@Test
@@ -313,11 +315,11 @@ public class RestfulConnectionTest {
conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET);
- Assert.assertEquals("UTF-8", charset);
+ assertEquals("UTF-8", charset);
String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE);
- Assert.assertEquals("en_US.UTF-8", locale);
+ assertEquals("en_US.UTF-8", locale);
String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
- Assert.assertEquals("UTC-8", timezone);
+ assertEquals("UTC-8", timezone);
}
@Test(expected = SQLFeatureNotSupportedException.class)
@@ -345,14 +347,15 @@ public class RestfulConnectionTest {
conn.abort(null);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void setNetworkTimeout() throws SQLException {
conn.setNetworkTimeout(null, 1000);
}
- @Test(expected = SQLFeatureNotSupportedException.class)
+ @Test
public void getNetworkTimeout() throws SQLException {
- conn.getNetworkTimeout();
+ int timeout = conn.getNetworkTimeout();
+ assertEquals(0, timeout);
}
@Test
diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js
index 03d27e5593ccb15d8ff47cd3c3dedba765d14fc1..5ba2739c35b1f0aef61ba3e52ae5d2f3a901df77 100644
--- a/src/connector/nodejs/nodetaos/cinterface.js
+++ b/src/connector/nodejs/nodetaos/cinterface.js
@@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
return res;
}
+function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let len = data.readIntLE(currOffset, 2);
+ let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
+ if (dataEntry[0] == 255) {
+ res.push(null)
+ } else {
+ res.push(dataEntry.toString("utf-8"));
+ }
+ currOffset += nbytes;
+ }
+ return res;
+}
+
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
while (currOffset < data.length) {
let len = data.readIntLE(currOffset, 2);
let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
- res.push(dataEntry.toString("utf-8"));
+ if (dataEntry[0] == 255 && dataEntry[1] == 255) {
+ res.push(null)
+ } else {
+ res.push(dataEntry.toString("utf-8"));
+ }
currOffset += nbytes;
}
return res;
@@ -132,7 +154,7 @@ let convertFunctions = {
[FieldTypes.C_BIGINT]: convertBigint,
[FieldTypes.C_FLOAT]: convertFloat,
[FieldTypes.C_DOUBLE]: convertDouble,
- [FieldTypes.C_BINARY]: convertNchar,
+ [FieldTypes.C_BINARY]: convertBinary,
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
[FieldTypes.C_NCHAR]: convertNchar
}
diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js
new file mode 100644
index 0000000000000000000000000000000000000000..68fad89c22894ec358d55e9c03746fbd86ce0c99
--- /dev/null
+++ b/src/connector/nodejs/test/testnchar.js
@@ -0,0 +1,33 @@
+const taos = require('../tdengine');
+var conn = taos.connect({ host: "localhost" });
+var c1 = conn.cursor();
+
+
+function checkData(data, row, col, expect) {
+ let checkdata = data[row][col];
+ if (checkdata == expect) {
+ // console.log('check pass')
+ }
+ else {
+ console.log('check failed, expect ' + expect + ', but is ' + checkdata)
+ }
+}
+
+c1.execute('drop database if exists testnodejsnchar')
+c1.execute('create database testnodejsnchar')
+c1.execute('use testnodejsnchar');
+c1.execute('create table tb (ts timestamp, value float, text binary(200))')
+c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") -
+c1.execute('insert into tb values(1623254400150, 24.7, NULL);')
+c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");')
+sql = 'select * from tb;'
+
+console.log('*******************************************')
+
+c1.execute(sql);
+data = c1.fetchall();
+console.log(data)
+//check data about insert data
+checkData(data, 0, 2, '中文10000000000000000000000')
+checkData(data, 1, 2, null)
+checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000')
\ No newline at end of file
diff --git a/src/connector/python/examples/demo.py b/src/connector/python/examples/demo.py
index 6c7c03f3e2c9630fab2af661d5c589066c21755f..3bc09046f3a33557e513425c06373c66958f2a2f 100644
--- a/src/connector/python/examples/demo.py
+++ b/src/connector/python/examples/demo.py
@@ -2,7 +2,7 @@ import taos
conn = taos.connect(host='127.0.0.1',
user='root',
- passworkd='taodata',
+ password='taosdata',
database='log')
cursor = conn.cursor()
diff --git a/src/connector/python/taos/bind.py b/src/connector/python/taos/bind.py
index ede6381628ae0fd5ff8794ef23db2f5afcfb5f3d..083ddc99aea8dc6f39b1f22ac5f77d2584a2fe69 100644
--- a/src/connector/python/taos/bind.py
+++ b/src/connector/python/taos/bind.py
@@ -10,7 +10,8 @@ import sys
_datetime_epoch = datetime.utcfromtimestamp(0)
def _is_not_none(obj):
- obj != None
+ return obj != None
+
class TaosBind(ctypes.Structure):
_fields_ = [
("buffer_type", c_int),
@@ -299,27 +300,14 @@ class TaosMultiBind(ctypes.Structure):
self.buffer = cast(buffer, c_void_p)
self.num = len(values)
- def binary(self, values):
+ def _str_to_buffer(self, values):
self.num = len(values)
- self.buffer = cast(c_char_p("".join(filter(_is_not_none, values)).encode("utf-8")), c_void_p)
- self.length = (c_int * len(values))(*[len(value) if value is not None else 0 for value in values])
- self.buffer_type = FieldType.C_BINARY
- self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
-
- def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
- try:
- buffer = cast(values, c_void_p)
- except:
- buffer_type = c_int64 * len(values)
- buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
-
- self.buffer_type = FieldType.C_TIMESTAMP
- self.buffer = cast(buffer, c_void_p)
- self.buffer_length = sizeof(c_int64)
- self.num = len(values)
-
- def nchar(self, values):
- # type: (list[str]) -> None
+ is_null = [1 if v == None else 0 for v in values]
+ self.is_null = cast((c_byte * self.num)(*is_null), c_char_p)
+
+ if sum(is_null) == self.num:
+ self.length = (c_int32 * len(values))(0 * self.num)
+ return
if sys.version_info < (3, 0):
_bytes = [bytes(value) if value is not None else None for value in values]
buffer_length = max(len(b) + 1 for b in _bytes if b is not None)
@@ -347,9 +335,26 @@ class TaosMultiBind(ctypes.Structure):
)
self.length = (c_int32 * len(values))(*[len(b) if b is not None else 0 for b in _bytes])
self.buffer_length = buffer_length
+ def binary(self, values):
+ self.buffer_type = FieldType.C_BINARY
+ self._str_to_buffer(values)
+
+ def timestamp(self, values, precision=PrecisionEnum.Milliseconds):
+ try:
+ buffer = cast(values, c_void_p)
+ except:
+ buffer_type = c_int64 * len(values)
+ buffer = buffer_type(*[_datetime_to_timestamp(value, precision) for value in values])
+
+ self.buffer_type = FieldType.C_TIMESTAMP
+ self.buffer = cast(buffer, c_void_p)
+ self.buffer_length = sizeof(c_int64)
self.num = len(values)
- self.is_null = cast((c_byte * self.num)(*[1 if v == None else 0 for v in values]), c_char_p)
+
+ def nchar(self, values):
+ # type: (list[str]) -> None
self.buffer_type = FieldType.C_NCHAR
+ self._str_to_buffer(values)
def tinyint_unsigned(self, values):
self.buffer_type = FieldType.C_TINYINT_UNSIGNED
diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py
index 51e9a8667ddcab3d5d67da8be429f460f33d9eed..c5737ea5a07b7678e058307dfe3b47546dd99909 100644
--- a/src/connector/python/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -49,7 +49,7 @@ def _load_taos():
try:
return load_func[platform.system()]()
except:
- sys.exit("unsupported platform to TDengine connector")
+ raise InterfaceError('unsupported platform or failed to load taos client library')
_libtaos = _load_taos()
@@ -102,9 +102,7 @@ _libtaos.taos_get_client_info.restype = c_char_p
def taos_get_client_info():
# type: () -> str
- """Get client version info.
- 获取客户端版本信息。
- """
+ """Get client version info."""
return _libtaos.taos_get_client_info().decode()
@@ -114,6 +112,7 @@ _libtaos.taos_get_server_info.argtypes = (c_void_p,)
def taos_get_server_info(connection):
# type: (c_void_p) -> str
+ """Get server version as string."""
return _libtaos.taos_get_server_info(connection).decode()
@@ -134,11 +133,10 @@ _libtaos.taos_connect.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint1
def taos_connect(host=None, user="root", password="taosdata", db=None, port=0):
# type: (None|str, str, str, None|str, int) -> c_void_p
"""Create TDengine database connection.
- 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
- - host: server hostname/FQDN, TDengine管理主节点的FQDN
- - user: user name/用户名
- - password: user password / 用户密码
+ - host: server hostname/FQDN
+ - user: user name
+ - password: user password
- db: database name (optional)
- port: server port
@@ -187,11 +185,10 @@ _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_
def taos_connect_auth(host=None, user="root", auth="", db=None, port=0):
# type: (None|str, str, str, None|str, int) -> c_void_p
- """
- 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含:
+ """Connect server with auth token.
- - host: server hostname/FQDN, TDengine管理主节点的FQDN
- - user: user name/用户名
+ - host: server hostname/FQDN
+ - user: user name
- auth: base64 encoded auth token
- db: database name (optional)
- port: server port
@@ -830,6 +827,22 @@ def taos_insert_lines(connection, lines):
if errno != 0:
raise LinesError("insert lines error", errno)
+def taos_insert_telnet_lines(connection, lines):
+ # type: (c_void_p, list[str] | tuple(str)) -> None
+ num_of_lines = len(lines)
+ lines = (c_char_p(line.encode("utf-8")) for line in lines)
+ lines_type = ctypes.c_char_p * num_of_lines
+ p_lines = lines_type(*lines)
+ errno = _libtaos.taos_insert_telnet_lines(connection, p_lines, num_of_lines)
+ if errno != 0:
+ raise TelnetLinesError("insert telnet lines error", errno)
+
+def taos_insert_json_payload(connection, payload):
+ # type: (c_void_p, list[str] | tuple(str)) -> None
+ payload = payload.encode("utf-8")
+ errno = _libtaos.taos_insert_json_payload(connection, payload)
+ if errno != 0:
+ raise JsonPayloadError("insert json payload error", errno)
class CTaosInterface(object):
def __init__(self, config=None):
diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py
index 7857c8c706dbe27fd9440e6bf2eb698b6822650e..35aca1fb26c1e612c3b3f6b1d8c794495bed0035 100644
--- a/src/connector/python/taos/connection.py
+++ b/src/connector/python/taos/connection.py
@@ -145,6 +145,34 @@ class TaosConnection(object):
"""
return taos_insert_lines(self._conn, lines)
+ def insert_telnet_lines(self, lines):
+ """OpenTSDB telnet style API format support
+
+ ## Example
+ cpu_load 1626056811855516532ns 2.0f32 id="tb1",host="host0",interface="eth0"
+
+ """
+ return taos_insert_telnet_lines(self._conn, lines)
+
+ def insert_json_payload(self, payload):
+ """OpenTSDB HTTP JSON format support
+
+ ## Example
+ "{
+ "metric": "cpu_load_0",
+ "timestamp": 1626006833610123,
+ "value": 55.5,
+ "tags":
+ {
+ "host": "ubuntu",
+ "interface": "eth0",
+ "Id": "tb0"
+ }
+ }"
+
+ """
+ return taos_insert_json_payload(self._conn, payload)
+
def cursor(self):
# type: () -> TaosCursor
"""Return a new Cursor object using the connection."""
diff --git a/src/connector/python/taos/constants.py b/src/connector/python/taos/constants.py
index b500df627c22919e7aab964504ccbe50c573c1c5..8ad5b69fc099718fa4f4b8c08cf689b17663eae0 100644
--- a/src/connector/python/taos/constants.py
+++ b/src/connector/python/taos/constants.py
@@ -3,6 +3,9 @@
"""Constants in TDengine python
"""
+import ctypes, struct
+
+
class FieldType(object):
"""TDengine Field Types"""
@@ -33,8 +36,8 @@ class FieldType(object):
C_INT_UNSIGNED_NULL = 4294967295
C_BIGINT_NULL = -9223372036854775808
C_BIGINT_UNSIGNED_NULL = 18446744073709551615
- C_FLOAT_NULL = float("nan")
- C_DOUBLE_NULL = float("nan")
+ C_FLOAT_NULL = ctypes.c_float(struct.unpack("is_use_passwd = true;
+ else if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ strcpy(tsOsName, "Darwin");
+ printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", g_password) > 1) {
+ fprintf(stderr, "password read error\n");
+ }
+ taosSetConsoleEcho(true);
+ getchar();
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ }
+ arguments->password = g_password;
+ arguments->is_use_passwd = true;
+ strcpy(argv[i], "");
+ argc -= 1;
}
- // for management port
+ // for management port
else if (strcmp(argv[i], "-P") == 0) {
if (i < argc - 1) {
arguments->port = atoi(argv[++i]);
@@ -98,7 +120,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
- if (i < argc - 1) {
+ if (i < argc - 1) {
if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1);
exit(EXIT_FAILURE);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index cdce61e578de729e09bd30e893b5f4e97a6128d8..d30b868bd59d8891db06f78e80cad8cb8eca10be 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -44,6 +44,13 @@ char PROMPT_HEADER[] = "tq> ";
char CONTINUE_PROMPT[] = " -> ";
int prompt_size = 4;
+#elif (_TD_PRO_ == true)
+char CLIENT_VERSION[] = "Welcome to the ProDB shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by Hanatech, Inc. All rights reserved.\n\n";
+char PROMPT_HEADER[] = "ProDB> ";
+
+char CONTINUE_PROMPT[] = " -> ";
+int prompt_size = 7;
#else
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
@@ -65,7 +72,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
*/
TAOS *shellInit(SShellArguments *_args) {
printf("\n");
- printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
+ if (!_args->is_use_passwd) {
+#ifdef TD_WINDOWS
+ strcpy(tsOsName, "Windows");
+#elif defined(TD_DARWIN)
+ strcpy(tsOsName, "Darwin");
+#endif
+ printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
+ }
+
fflush(stdout);
// set options before initializing
@@ -73,9 +88,7 @@ TAOS *shellInit(SShellArguments *_args) {
taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
- if (_args->is_use_passwd) {
- if (_args->password == NULL) _args->password = getpass("Enter password: ");
- } else {
+ if (!_args->is_use_passwd) {
_args->password = TSDB_DEFAULT_PASS;
}
@@ -169,7 +182,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) {
system("clear");
return 0;
}
-
+
if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
strtok(command, " \t");
strtok(NULL, " \t");
@@ -181,7 +194,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) {
}
return 0;
}
-
+
if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
/* If source file. */
char *c_ptr = strtok(command, " ;");
@@ -246,10 +259,14 @@ int32_t shellRunCommand(TAOS* con, char* command) {
esc = false;
continue;
}
-
+
if (c == '\\') {
- esc = true;
- continue;
+ if (quote != 0 && (*command == '_' || *command == '\\')) {
+ //DO nothing
+ } else {
+ esc = true;
+ continue;
+ }
}
if (quote == c) {
@@ -335,8 +352,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
}
if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands
- int error_no = 0;
-
+ int error_no = 0;
+
int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode);
if (numOfRows < 0) {
atomic_store_64(&result, 0);
@@ -529,7 +546,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) {
fprintf(fp, "%s", fields[col].name);
}
fputc('\n', fp);
-
+
int numOfRows = 0;
do {
int32_t* length = taos_fetch_lengths(tres);
@@ -559,7 +576,7 @@ static void shellPrintNChar(const char *str, int length, int width) {
while (pos < length) {
wchar_t wc;
int bytes = mbtowc(&wc, str + pos, MB_CUR_MAX);
- if (bytes == 0) {
+ if (bytes <= 0) {
break;
}
pos += bytes;
@@ -715,7 +732,7 @@ static int verticalPrintResult(TAOS_RES* tres) {
int numOfRows = 0;
int showMore = 1;
- do {
+ do {
if (numOfRows < resShowMaxNum) {
printf("*************************** %d.row ***************************\n", numOfRows + 1);
@@ -850,7 +867,7 @@ static int horizontalPrintResult(TAOS_RES* tres) {
int numOfRows = 0;
int showMore = 1;
-
+
do {
int32_t* length = taos_fetch_lengths(tres);
if (numOfRows < resShowMaxNum) {
@@ -866,7 +883,7 @@ static int horizontalPrintResult(TAOS_RES* tres) {
printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n");
showMore = 0;
}
-
+
numOfRows++;
row = taos_fetch_row(tres);
} while(row != NULL);
@@ -908,7 +925,7 @@ void read_history() {
if (errno != ENOENT) {
fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
}
-#endif
+#endif
return;
}
@@ -933,9 +950,9 @@ void write_history() {
FILE *f = fopen(f_history, "w");
if (f == NULL) {
-#ifndef WINDOWS
+#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
-#endif
+#endif
return;
}
@@ -981,13 +998,13 @@ void source_file(TAOS *con, char *fptr) {
/*
if (access(fname, F_OK) != 0) {
fprintf(stderr, "ERROR: file %s is not exist\n", fptr);
-
+
wordfree(&full_path);
free(cmd);
return;
}
*/
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c
index dc74f6fcaa152c547d734ed4e186b45b94ce8de5..93783b205560604c9d25c9f5dc2e73a239a67b8e 100644
--- a/src/kit/shell/src/shellLinux.c
+++ b/src/kit/shell/src/shellLinux.c
@@ -34,7 +34,7 @@ static char doc[] = "";
static char args_doc[] = "";
static struct argp_option options[] = {
{"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."},
- {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
+ {"password", 'p', 0, 0, "The password to use when connecting to the server."},
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
{"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
@@ -47,9 +47,11 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"check", 'k', "CHECK", 0, "Check tables."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
- {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
- {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync."},
+ {"timezone", 'z', "TIMEZONE", 0, "Time zone of the shell, default is local."},
+ {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speen|fqdn."},
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
+ {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."},
+ {"pkttype", 'S', "PKTTYPE", 0, "Packet type used for net test, default is TCP."},
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
@@ -63,8 +65,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->host = arg;
break;
case 'p':
- arguments->is_use_passwd = true;
- if (arg) arguments->password = arg;
break;
case 'P':
if (arg) {
@@ -76,7 +76,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
}
break;
- case 't':
+ case 'z':
arguments->timezone = arg;
break;
case 'u':
@@ -108,7 +108,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->is_raw_time = true;
break;
case 'f':
- if (wordexp(arg, &full_path, 0) != 0) {
+ if ((0 == strlen(arg)) || (wordexp(arg, &full_path, 0) != 0)) {
fprintf(stderr, "Invalid path %s\n", arg);
return -1;
}
@@ -148,6 +148,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return -1;
}
break;
+ case 'N':
+ if (arg) {
+ arguments->pktNum = atoi(arg);
+ } else {
+ fprintf(stderr, "Invalid packet number\n");
+ return -1;
+ }
+ break;
+ case 'S':
+ arguments->pktType = arg;
+ break;
case OPT_ABORT:
arguments->abort = 1;
break;
@@ -160,12 +171,48 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
/* Our argp parser. */
static struct argp argp = {options, parse_opt, args_doc, doc};
+char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
+char g_password[SHELL_MAX_PASSWORD_LEN];
+
+static void parse_args(
+ int argc, char *argv[], SShellArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ strcpy(tsOsName, "Linux");
+ printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%20s", g_password) > 1) {
+ fprintf(stderr, "password reading error\n");
+ }
+ taosSetConsoleEcho(true);
+ if (EOF == getchar()) {
+ fprintf(stderr, "getchar() return EOF\n");
+ }
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ strcpy(argv[i], "-p");
+ }
+ arguments->password = g_password;
+ arguments->is_use_passwd = true;
+ }
+ }
+}
+
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
static char verType[32] = {0};
sprintf(verType, "version: %s\n", version);
argp_program_version = verType;
-
+
+ if (argc > 1) {
+ parse_args(argc, argv, arguments);
+ }
+
argp_parse(&argp, argc, argv, 0, 0, arguments);
if (arguments->abort) {
#ifndef _ALPINE
diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c
index 0c70386061b99baaf2f9448ddadbb250685f23d4..5c9dc0995dacecebd10b7f2b77e216ca97157db0 100644
--- a/src/kit/shell/src/shellMain.c
+++ b/src/kit/shell/src/shellMain.c
@@ -71,7 +71,9 @@ int checkVersion() {
// Global configurations
SShellArguments args = {
.host = NULL,
+#ifndef TD_WINDOWS
.password = NULL,
+#endif
.user = NULL,
.database = NULL,
.timezone = NULL,
@@ -83,6 +85,8 @@ SShellArguments args = {
.threadNum = 5,
.commands = NULL,
.pktLen = 1000,
+ .pktNum = 100,
+ .pktType = "TCP",
.netTestRole = NULL
};
@@ -116,7 +120,7 @@ int main(int argc, char* argv[]) {
printf("Failed to init taos");
exit(EXIT_FAILURE);
}
- taosNetTest(args.netTestRole, args.host, args.port, args.pktLen);
+ taosNetTest(args.netTestRole, args.host, args.port, args.pktLen, args.pktNum, args.pktType);
exit(0);
}
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index 87d11a3516a65e83201bf4ebe51f07e5394d5cdf..cb707d9331d3f1f87227e5096b6d7f047d350ebf 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -19,6 +19,9 @@
extern char configDir[];
+char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
+ "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
+
void printVersion() {
printf("version: %s\n", version);
}
@@ -52,15 +55,21 @@ void printHelp() {
printf("%s%s\n", indent, "-t");
printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local.");
printf("%s%s\n", indent, "-n");
- printf("%s%s%s\n", indent, indent, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync.");
+ printf("%s%s%s\n", indent, indent, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speed|fqdn.");
printf("%s%s\n", indent, "-l");
printf("%s%s%s\n", indent, indent, "Packet length used for net test, default is 1000 bytes.");
+ printf("%s%s\n", indent, "-N");
+ printf("%s%s%s\n", indent, indent, "Packet numbers used for net test, default is 100.");
+ printf("%s%s\n", indent, "-S");
+ printf("%s%s%s\n", indent, indent, "Packet type used for net test, default is TCP.");
printf("%s%s\n", indent, "-V");
printf("%s%s%s\n", indent, indent, "Print program version.");
exit(EXIT_SUCCESS);
}
+char g_password[SHELL_MAX_PASSWORD_LEN];
+
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) {
// for host
@@ -73,11 +82,26 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
}
}
// for password
- else if (strcmp(argv[i], "-p") == 0) {
- arguments->is_use_passwd = true;
- if (i < argc - 1 && argv[i + 1][0] != '-') {
- arguments->password = argv[++i];
- }
+ else if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ arguments->is_use_passwd = true;
+ strcpy(tsOsName, "Windows");
+ printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info());
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", g_password) > 1) {
+ fprintf(stderr, "password read error!\n");
+ }
+ taosSetConsoleEcho(true);
+ getchar();
+ } else {
+ tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
+ }
+ arguments->password = g_password;
+ strcpy(argv[i], "");
+ argc -= 1;
}
// for management port
else if (strcmp(argv[i], "-P") == 0) {
@@ -104,7 +128,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[i], "-c") == 0) {
- if (i < argc - 1) {
+ if (i < argc - 1) {
char *tmp = argv[++i];
if (strlen(tmp) >= TSDB_FILENAME_LEN) {
fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1);
@@ -170,6 +194,22 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
exit(EXIT_FAILURE);
}
}
+ else if (strcmp(argv[i], "-N") == 0) {
+ if (i < argc - 1) {
+ arguments->pktNum = atoi(argv[++i]);
+ } else {
+ fprintf(stderr, "option -N requires an argument\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+ else if (strcmp(argv[i], "-S") == 0) {
+ if (i < argc - 1) {
+ arguments->pktType = argv[++i];
+ } else {
+ fprintf(stderr, "option -S requires an argument\n");
+ exit(EXIT_FAILURE);
+ }
+ }
else if (strcmp(argv[i], "-V") == 0) {
printVersion();
exit(EXIT_SUCCESS);
@@ -265,7 +305,7 @@ void *shellLoopQuery(void *arg) {
if (command == NULL) return NULL;
int32_t err = 0;
-
+
do {
memset(command, 0, MAX_COMMAND_SIZE);
shellPrintPrompt();
@@ -274,7 +314,7 @@ void *shellLoopQuery(void *arg) {
err = shellReadCommand(con, command);
if (err) {
break;
- }
+ }
} while (shellRunCommand(con, command) == 0);
return NULL;
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 3b91de32b043fc5233fb589e8ff7f349aa01171c..5adf9f342a41f9b3886c9c9654e0ef9dd7571c54 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -20,6 +20,7 @@
#include
#include
+#include
#define _GNU_SOURCE
#define CURL_STATICLIB
@@ -53,14 +54,6 @@
#include "taoserror.h"
#include "tutil.h"
-#define STMT_IFACE_ENABLED 1
-#define NANO_SECOND_ENABLED 1
-#define SET_THREADNAME_ENABLED 1
-
-#if SET_THREADNAME_ENABLED == 0
-#define setThreadName(name)
-#endif
-
#define REQ_EXTRA_BUF_LEN 1024
#define RESP_BUF_LEN 4096
@@ -77,13 +70,14 @@ extern char configDir[];
#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS)
#define MAX_USERNAME_SIZE 64
-#define MAX_PASSWORD_SIZE 16
#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
+#define DEFAULT_START_TIME 1500000000000
+
#define MAX_PREPARED_RAND 1000000
#define INT_BUFF_LEN 11
#define BIGINT_BUFF_LEN 21
@@ -94,7 +88,7 @@ extern char configDir[];
#define DOUBLE_BUFF_LEN 42
#define TIMESTAMP_BUFF_LEN 21
-#define MAX_SAMPLES_ONCE_FROM_FILE 10000
+#define MAX_SAMPLES 10000
#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp
#define MAX_DB_COUNT 8
@@ -110,8 +104,19 @@ extern char configDir[];
#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3)
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
+#define DEFAULT_NTHREADS 8
#define DEFAULT_TIMESTAMP_STEP 1
+#define DEFAULT_INTERLACE_ROWS 0
+#define DEFAULT_DATATYPE_NUM 1
+#define DEFAULT_CHILDTABLES 10000
+#define STMT_BIND_PARAM_BATCH 1
+
+char* g_sampleDataBuf = NULL;
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+char* g_sampleBindBatchArray = NULL;
+#endif
enum TEST_MODE {
INSERT_TEST, // 0
@@ -120,17 +125,17 @@ enum TEST_MODE {
INVAID_TEST
};
-typedef enum CREATE_SUB_TALBE_MOD_EN {
+typedef enum CREATE_SUB_TABLE_MOD_EN {
PRE_CREATE_SUBTBL,
AUTO_CREATE_SUBTBL,
NO_CREATE_SUBTBL
-} CREATE_SUB_TALBE_MOD_EN;
+} CREATE_SUB_TABLE_MOD_EN;
-typedef enum TALBE_EXISTS_EN {
+typedef enum TABLE_EXISTS_EN {
TBL_NO_EXISTS,
TBL_ALREADY_EXISTS,
TBL_EXISTS_BUTT
-} TALBE_EXISTS_EN;
+} TABLE_EXISTS_EN;
enum enumSYNC_MODE {
SYNC_MODE,
@@ -210,41 +215,43 @@ enum _describe_table_index {
static char *g_dupstr = NULL;
typedef struct SArguments_S {
- char * metaFile;
+ char *metaFile;
uint32_t test_mode;
- char * host;
+ char *host;
uint16_t port;
uint16_t iface;
char * user;
- char password[MAX_PASSWORD_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
char * database;
int replica;
char * tb_prefix;
char * sqlFile;
bool use_metric;
bool drop_database;
- bool insert_only;
+ bool aggr_func;
bool answer_yes;
bool debug_print;
bool verbose_print;
bool performance_print;
char * output_file;
bool async_mode;
- char * datatype[MAX_NUM_COLUMNS + 1];
- uint32_t len_of_binary;
- uint32_t num_of_CPR;
- uint32_t num_of_threads;
+ char data_type[MAX_NUM_COLUMNS+1];
+ char *dataType[MAX_NUM_COLUMNS+1];
+ uint32_t binwidth;
+ uint32_t columnCount;
+ uint64_t lenOfOneRow;
+ uint32_t nthreads;
uint64_t insert_interval;
uint64_t timestamp_step;
int64_t query_times;
- uint32_t interlace_rows;
- uint32_t num_of_RPR; // num_of_records_per_req
+ uint32_t interlaceRows;
+ uint32_t reqPerReq; // num_of_records_per_req
uint64_t max_sql_len;
- int64_t num_of_tables;
- int64_t num_of_DPT;
+ int64_t ntables;
+ int64_t insertRows;
int abort;
uint32_t disorderRatio; // 0: no disorder, >0: x%
- int disorderRange; // ms, us or ns. accordig to database precision
+ int disorderRange; // ms, us or ns. according to database precision
uint32_t method_of_delete;
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
@@ -252,14 +259,15 @@ typedef struct SArguments_S {
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN];
- char dataType[DATATYPE_BUFF_LEN];
- uint32_t dataLen;
- char note[NOTE_BUFF_LEN];
+ char field[TSDB_COL_NAME_LEN];
+ char data_type;
+ char dataType[DATATYPE_BUFF_LEN];
+ uint32_t dataLen;
+ char note[NOTE_BUFF_LEN];
} StrColumn;
typedef struct SSuperTable_S {
- char sTblName[TSDB_TABLE_NAME_LEN];
+ char stbName[TSDB_TABLE_NAME_LEN];
char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample
char childTblPrefix[TBNAME_PREFIX_LEN];
uint16_t childTblExists;
@@ -295,17 +303,16 @@ typedef struct SSuperTable_S {
uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
-#if STMT_IFACE_ENABLED == 1
- char* sampleBindArray;
-#endif
- //int sampleRowCount;
- //int sampleUsePos;
uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
uint32_t tagSampleCount;
uint32_t tagUsePos;
+#if STMT_BIND_PARAM_BATCH == 1
+ // bind param batch
+ char *sampleBindBatchArray;
+#endif
// statistics
uint64_t totalInsertRows;
uint64_t totalAffectedRows;
@@ -366,15 +373,14 @@ typedef struct SDbs_S {
uint16_t port;
char user[MAX_USERNAME_SIZE];
- char password[MAX_PASSWORD_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
- bool insert_only;
- bool do_aggreFunc;
+ bool aggr_func;
bool asyncMode;
uint32_t threadCount;
- uint32_t threadCountByCreateTbl;
+ uint32_t threadCountForCreateTbl;
uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
@@ -385,7 +391,7 @@ typedef struct SDbs_S {
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t concurrent;
int sqlCount;
uint32_t asyncMode; // 0: sync, 1: async
@@ -405,8 +411,8 @@ typedef struct SpecifiedQueryInfo_S {
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
- char sTblName[TSDB_TABLE_NAME_LEN];
- uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ char stbName[TSDB_TABLE_NAME_LEN];
+ uint64_t queryInterval; // 0: unlimited > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
@@ -432,7 +438,7 @@ typedef struct SQueryMetaInfo_S {
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
- char password[MAX_PASSWORD_SIZE];
+ char password[SHELL_MAX_PASSWORD_LEN];
char dbName[TSDB_DB_NAME_LEN];
char queryMode[SMALL_BUFF_LEN]; // taosc, rest
@@ -445,6 +451,15 @@ typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT *stmt;
int64_t *bind_ts;
+
+#if STMT_BIND_PARAM_BATCH == 1
+ int64_t *bind_ts_array;
+ char *bindParams;
+ char *is_null;
+#else
+ char* sampleBindArray;
+#endif
+
int threadID;
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
@@ -454,6 +469,7 @@ typedef struct SThreadInfo_S {
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
+ int64_t tables_created;
uint64_t data_of_rate;
int64_t start_time;
char* cols;
@@ -589,54 +605,61 @@ char *g_rand_current_buff = NULL;
char *g_rand_phase_buff = NULL;
char *g_randdouble_buff = NULL;
-char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)",
- "max(col0)", "min(col0)", "first(col0)", "last(col0)"};
+char *g_aggreFuncDemo[] = {"*", "count(*)", "avg(current)", "sum(current)",
+ "max(current)", "min(current)", "first(current)", "last(current)"};
-#define DEFAULT_DATATYPE_NUM 3
+char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)",
+ "max(C0)", "min(C0)", "first(C0)", "last(C0)"};
SArguments g_args = {
- NULL, // metaFile
- 0, // test_mode
- "127.0.0.1", // host
- 6030, // port
- INTERFACE_BUT, // iface
- "root", // user
+ NULL, // metaFile
+ 0, // test_mode
+ "localhost", // host
+ 6030, // port
+ INTERFACE_BUT, // iface
+ "root", // user
#ifdef _TD_POWER_
"powerdb", // password
#elif (_TD_TQ_ == true)
- "tqueue", // password
+ "tqueue", // password
+#elif (_TD_PRO_ == true)
+ "prodb", // password
#else
- "taosdata", // password
+ "taosdata", // password
#endif
- "test", // database
- 1, // replica
+ "test", // database
+ 1, // replica
"d", // tb_prefix
NULL, // sqlFile
true, // use_metric
true, // drop_database
- true, // insert_only
+ false, // aggr_func
false, // debug_print
false, // verbose_print
false, // performance statistic print
false, // answer_yes;
"./output.txt", // output_file
0, // mode : sync or async
+ {TSDB_DATA_TYPE_FLOAT,
+ TSDB_DATA_TYPE_INT,
+ TSDB_DATA_TYPE_FLOAT},
{
- "FLOAT", // datatype
- "INT", // datatype
- "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3
+ "FLOAT", // dataType
+ "INT", // dataType
+ "FLOAT", // dataType. demo mode has 3 columns
},
- 16, // len_of_binary
- 4, // num_of_CPR
- 10, // num_of_connections/thread
+ 64, // binwidth
+ 4, // columnCount, timestamp + float + int + float
+ 20 + FLOAT_BUFF_LEN + INT_BUFF_LEN + FLOAT_BUFF_LEN, // lenOfOneRow
+ DEFAULT_NTHREADS,// nthreads
0, // insert_interval
DEFAULT_TIMESTAMP_STEP, // timestamp_step
1, // query_times
- 0, // interlace_rows;
- 30000, // num_of_RPR
+ DEFAULT_INTERLACE_ROWS, // interlaceRows;
+ 30000, // reqPerReq
(1024*1024), // max_sql_len
- 10000, // num_of_tables
- 10000, // num_of_DPT
+ DEFAULT_CHILDTABLES, // ntables
+ 10000, // insertRows
0, // abort
0, // disorderRatio
1000, // disorderRange
@@ -646,10 +669,9 @@ SArguments g_args = {
true, // demo_mode;
};
-
-
static SDbs g_Dbs;
-static int64_t g_totalChildTables = 0;
+static int64_t g_totalChildTables = DEFAULT_CHILDTABLES;
+static int64_t g_actualChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
@@ -670,7 +692,28 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
- do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0)
+ do {\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\
+ fprintf(stderr, " \033[0m");\
+ } while(0)
+
+#define errorPrint2(fmt, ...) \
+ do {\
+ struct tm Tm, *ptm;\
+ struct timeval timeSecs; \
+ time_t curTime;\
+ gettimeofday(&timeSecs, NULL); \
+ curTime = timeSecs.tv_sec;\
+ ptm = localtime_r(&curTime, &Tm);\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "%02d/%02d %02d:%02d:%02d.%06d %08" PRId64 " ",\
+ ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\
+ ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\
+ taosGetSelfPthreadId());\
+ fprintf(stderr, " \033[0m");\
+ errorPrint(fmt, __VA_ARGS__);\
+ } while(0)
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
@@ -698,101 +741,113 @@ static void printVersion() {
char taosdemo_status[] = TAOSDEMO_STATUS;
if (strlen(taosdemo_status) == 0) {
- printf("taosdemo verison %s-%s\n",
+ printf("taosdemo version %s-%s\n",
tdengine_ver, taosdemo_ver);
} else {
- printf("taosdemo verison %s-%s, status:%s\n",
+ printf("taosdemo version %s-%s, status:%s\n",
tdengine_ver, taosdemo_ver, taosdemo_status);
}
}
static void printHelp() {
- char indent[10] = " ";
- printf("%s%s%s%s\n", indent, "-f", indent,
- "The meta file to the execution procedure. Default is './meta.json'.");
- printf("%s%s%s%s\n", indent, "-u", indent,
- "The TDengine user name to use when connecting to the server. Default is 'root'.");
+ char indent[10] = " ";
+ printf("%s\n\n", "Usage: taosdemo [OPTION...]");
+ printf("%s%s%s%s\n", indent, "-f, --file=FILE", "\t\t",
+ "The meta file to the execution procedure.");
+ printf("%s%s%s%s\n", indent, "-u, --user=USER", "\t\t",
+ "The user name to use when connecting to the server.");
#ifdef _TD_POWER_
- printf("%s%s%s%s\n", indent, "-p", indent,
- "The password to use when connecting to the server. Default is 'powerdb'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/power/'.");
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server. By default is 'powerdb'");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory. By default is '/etc/power/'.");
#elif (_TD_TQ_ == true)
- printf("%s%s%s%s\n", indent, "-p", indent,
- "The password to use when connecting to the server. Default is 'tqueue'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/tq/'.");
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server. By default is 'tqueue'");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory. By default is '/etc/tq/'.");
+#elif (_TD_PRO_ == true)
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server. By default is 'prodb'");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory. By default is '/etc/ProDB/'.");
#else
- printf("%s%s%s%s\n", indent, "-p", indent,
- "The password to use when connecting to the server. Default is 'taosdata'.");
- printf("%s%s%s%s\n", indent, "-c", indent,
- "Configuration directory. Default is '/etc/taos/'.");
+ printf("%s%s%s%s\n", indent, "-p, --password", "\t\t",
+ "The password to use when connecting to the server.");
+ printf("%s%s%s%s\n", indent, "-c, --config-dir=CONFIG_DIR", "\t",
+ "Configuration directory.");
#endif
- printf("%s%s%s%s\n", indent, "-h", indent,
- "The host to connect to TDengine. Default is localhost.");
- printf("%s%s%s%s\n", indent, "-P", indent,
- "The TCP/IP port number to use for the connection. Default is 0.");
- printf("%s%s%s%s\n", indent, "-I", indent,
-#if STMT_IFACE_ENABLED == 1
- "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'.");
-#else
- "The interface (taosc, rest) taosdemo uses. Default is 'taosc'.");
-#endif
- printf("%s%s%s%s\n", indent, "-d", indent,
- "Destination database. Default is 'test'.");
- printf("%s%s%s%s\n", indent, "-a", indent,
- "Set the replica parameters of the database, Default 1, min: 1, max: 3.");
- printf("%s%s%s%s\n", indent, "-m", indent,
- "Table prefix name. Default is 'd'.");
- printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file.");
- printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag.");
- printf("%s%s%s%s\n", indent, "-o", indent,
- "Direct output to the named file. Default is './output.txt'.");
- printf("%s%s%s%s\n", indent, "-q", indent,
- "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC.");
- printf("%s%s%s%s\n", indent, "-b", indent,
- "The data_type of columns, default: FLOAT, INT, FLOAT.");
- printf("%s%s%s%s\n", indent, "-w", indent,
- "The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
- printf("%s%s%s%s%d%s%d\n", indent, "-l", indent,
- "The number of columns per record. Default is ",
+ printf("%s%s%s%s\n", indent, "-h, --host=HOST", "\t\t",
+ "TDengine server FQDN to connect. The default host is localhost.");
+ printf("%s%s%s%s\n", indent, "-P, --port=PORT", "\t\t",
+ "The TCP/IP port number to use for the connection.");
+ printf("%s%s%s%s\n", indent, "-I, --interface=INTERFACE", "\t",
+ "The interface (taosc, rest, and stmt) taosdemo uses. By default use 'taosc'.");
+ printf("%s%s%s%s\n", indent, "-d, --database=DATABASE", "\t",
+ "Destination database. By default is 'test'.");
+ printf("%s%s%s%s\n", indent, "-a, --replica=REPLICA", "\t\t",
+ "Set the replica parameters of the database, By default use 1, min: 1, max: 3.");
+ printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t",
+ "Table prefix name. By default use 'd'.");
+ printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t",
+ "The select sql file.");
+ printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag.");
+ printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t",
+ "Direct output to the named file. By default use './output.txt'.");
+ printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t",
+ "Query mode -- 0: SYNC, 1: ASYNC. By default use SYNC.");
+ printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t",
+ "The data_type of columns, By default use: FLOAT, INT, FLOAT.");
+ printf("%s%s%s%s%d\n", indent, "-w, --binwidth=WIDTH", "\t\t",
+ "The width of data_type 'BINARY' or 'NCHAR'. By default use ",
+ g_args.binwidth);
+ printf("%s%s%s%s%d%s%d\n", indent, "-l, --columns=COLUMNS", "\t\t",
+ "The number of columns per record. Demo mode by default is ",
DEFAULT_DATATYPE_NUM,
- ". Max values is ",
+ " (float, int, float). Max values is ",
MAX_NUM_COLUMNS);
printf("%s%s%s%s\n", indent, indent, indent,
- "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
- printf("%s%s%s%s\n", indent, "-T", indent,
- "The number of threads. Default is 10.");
- printf("%s%s%s%s\n", indent, "-i", indent,
- "The sleep time (ms) between insertion. Default is 0.");
- printf("%s%s%s%s%d.\n", indent, "-S", indent,
- "The timestamp step between insertion. Default is ",
+ "\t\t\t\tAll of the new column(s) type is INT. If use -b to specify column type, -l will be ignored.");
+ printf("%s%s%s%s%d.\n", indent, "-T, --threads=NUMBER", "\t\t",
+ "The number of threads. By default use ", DEFAULT_NTHREADS);
+ printf("%s%s%s%s\n", indent, "-i, --insert-interval=NUMBER", "\t",
+ "The sleep time (ms) between insertion. By default is 0.");
+ printf("%s%s%s%s%d.\n", indent, "-S, --time-step=TIME_STEP", "\t",
+ "The timestamp step between insertion. By default is ",
DEFAULT_TIMESTAMP_STEP);
- printf("%s%s%s%s\n", indent, "-r", indent,
- "The number of records per request. Default is 30000.");
- printf("%s%s%s%s\n", indent, "-t", indent,
- "The number of tables. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-n", indent,
- "The number of records per table. Default is 10000.");
- printf("%s%s%s%s\n", indent, "-M", indent,
+ printf("%s%s%s%s%d.\n", indent, "-B, --interlace-rows=NUMBER", "\t",
+ "The interlace rows of insertion. By default is ",
+ DEFAULT_INTERLACE_ROWS);
+ printf("%s%s%s%s\n", indent, "-r, --rec-per-req=NUMBER", "\t",
+ "The number of records per request. By default is 30000.");
+ printf("%s%s%s%s\n", indent, "-t, --tables=NUMBER", "\t\t",
+ "The number of tables. By default is 10000.");
+ printf("%s%s%s%s\n", indent, "-n, --records=NUMBER", "\t\t",
+ "The number of records per table. By default is 10000.");
+ printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t",
"The value of records generated are totally random.");
- printf("%s%s%s%s\n", indent, indent, indent,
- " The default is to simulate power equipment senario.");
- printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag.");
- printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt.");
- printf("%s%s%s%s\n", indent, "-O", indent,
- "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order.");
- printf("%s%s%s%s\n", indent, "-R", indent,
- "Out of order data's range, ms, default is 1000.");
- printf("%s%s%s%s\n", indent, "-g", indent,
+ printf("%s\n", "\t\t\t\tBy default to simulate power equipment scenario.");
+ printf("%s%s%s%s\n", indent, "-x, --aggr-func", "\t\t",
+ "Test aggregation functions after insertion.");
+ printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Input yes for prompt.");
+ printf("%s%s%s%s\n", indent, "-O, --disorder=NUMBER", "\t\t",
+ "Insert order mode--0: In order, 1 ~ 50: disorder ratio. By default is in order.");
+ printf("%s%s%s%s\n", indent, "-R, --disorder-range=NUMBER", "\t",
+ "Out of order data's range. Unit is ms. By default is 1000.");
+ printf("%s%s%s%s\n", indent, "-g, --debug", "\t\t\t",
"Print debug info.");
- printf("%s%s%s\n", indent, "-V, --version\t",
- "Print version info.");
- printf("%s%s%s%s\n", indent, "--help\t", indent,
- "Print command line arguments list info.");
+ printf("%s%s%s%s\n", indent, "-?, --help\t", "\t\t",
+ "Give this help list");
+ printf("%s%s%s%s\n", indent, " --usage\t", "\t\t",
+ "Give a short usage message");
+ printf("%s%s\n", indent, "-V, --version\t\t\tPrint program version.");
/* printf("%s%s%s%s\n", indent, "-D", indent,
"Delete database if exists. 0: no, 1: yes, default is 1");
*/
+ printf("\nMandatory or optional arguments to long options are also mandatory or optional\n\
+for any corresponding short options.\n\
+\n\
+Report bugs to .\n");
}
static bool isStringNumber(char *input)
@@ -810,113 +865,441 @@ static bool isStringNumber(char *input)
return true;
}
+static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value)
+{
+ fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorUnrecognized(char *program, char *wrong_arg)
+{
+ fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires an argument -- '%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg2(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires a number argument '-%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg3(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option '%s' requires an argument\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
- if (strcmp(argv[i], "-f") == 0) {
+ if ((0 == strncmp(argv[i], "-f", strlen("-f")))
+ || (0 == strncmp(argv[i], "--file", strlen("--file")))) {
arguments->demo_mode = false;
- arguments->metaFile = argv[++i];
- } else if (strcmp(argv[i], "-c") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-c need a valid path following!\n");
+
+ if (2 == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg(argv[0], "f");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "-f", strlen("-f"))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("-f"));
+ } else if (strlen("--file") == strlen(argv[i])) {
+ if (i+1 == argc) {
+ errorPrintReqArg3(argv[0], "--file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->metaFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) {
+ arguments->metaFile = (char *)(argv[i] + strlen("--file="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
- } else if (strcmp(argv[i], "-h") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-h need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-c", strlen("-c")))
+ || (0 == strncmp(argv[i], "--config-dir", strlen("--config-dir")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "c");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN);
+ } else if (strlen("--config-dir") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--config-dir");
+ exit(EXIT_FAILURE);
+ }
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
+ } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) {
+ tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->host = argv[++i];
- } else if (strcmp(argv[i], "-P") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-P need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-h", strlen("-h")))
+ || (0 == strncmp(argv[i], "--host", strlen("--host")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "h");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "-h", strlen("-h"))) {
+ arguments->host = (char *)(argv[i] + strlen("-h"));
+ } else if (strlen("--host") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--host");
+ exit(EXIT_FAILURE);
+ }
+ arguments->host = argv[++i];
+ } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) {
+ arguments->host = (char *)(argv[i] + strlen("--host="));
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->port = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-I") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-I need a valid string following!\n");
+ } else if (strcmp(argv[i], "-PP") == 0) {
+ arguments->performance_print = true;
+ } else if ((0 == strncmp(argv[i], "-P", strlen("-P")))
+ || (0 == strncmp(argv[i], "--port", strlen("--port")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "P");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "P");
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--port=", strlen("--port="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--port=")))) {
+ arguments->port = atoi((char *)(argv[i]+strlen("--port=")));
+ }
+ } else if (0 == strncmp(argv[i], "-P", strlen("-P"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-P")))) {
+ arguments->port = atoi((char *)(argv[i]+strlen("-P")));
+ }
+ } else if (strlen("--port") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--port");
+ exit(EXIT_FAILURE);
+ }
+ arguments->port = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- ++i;
- if (0 == strcasecmp(argv[i], "taosc")) {
- arguments->iface = TAOSC_IFACE;
- } else if (0 == strcasecmp(argv[i], "rest")) {
- arguments->iface = REST_IFACE;
-#if STMT_IFACE_ENABLED == 1
- } else if (0 == strcasecmp(argv[i], "stmt")) {
- arguments->iface = STMT_IFACE;
-#endif
+ } else if ((0 == strncmp(argv[i], "-I", strlen("-I")))
+ || (0 == strncmp(argv[i], "--interface", strlen("--interface")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "I");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
+ } else if (0 == strncmp(argv[i], "--interface=", strlen("--interface="))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("--interface=")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-I", strlen("-I"))) {
+ if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "-I",
+ (char *)(argv[i] + strlen("-I")));
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interface") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interface");
+ exit(EXIT_FAILURE);
+ }
+ if (0 == strcasecmp(argv[i+1], "taosc")) {
+ arguments->iface = TAOSC_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "rest")) {
+ arguments->iface = REST_IFACE;
+ } else if (0 == strcasecmp(argv[i+1], "stmt")) {
+ arguments->iface = STMT_IFACE;
+ } else {
+ errorWrongValue(argv[0], "--interface", argv[i+1]);
+ exit(EXIT_FAILURE);
+ }
+ i++;
} else {
- errorPrint("%s", "\n\t-I need a valid string following!\n");
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- } else if (strcmp(argv[i], "-u") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-u need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-u", strlen("-u")))
+ || (0 == strncmp(argv[i], "--user", strlen("--user")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "u");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else if (0 == strncmp(argv[i], "-u", strlen("-u"))) {
+ arguments->user = (char *)(argv[i++] + strlen("-u"));
+ } else if (0 == strncmp(argv[i], "--user=", strlen("--user="))) {
+ arguments->user = (char *)(argv[i++] + strlen("--user="));
+ } else if (strlen("--user") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--user");
+ exit(EXIT_FAILURE);
+ }
+ arguments->user = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->user = argv[++i];
- } else if (strncmp(argv[i], "-p", 2) == 0) {
- if (strlen(argv[i]) == 2) {
- printf("Enter password:");
- scanf("%s", arguments->password);
+ } else if ((0 == strncmp(argv[i], "-p", strlen("-p")))
+ || (0 == strcmp(argv[i], "--password"))) {
+ if ((strlen(argv[i]) == 2) || (0 == strcmp(argv[i], "--password"))) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if (scanf("%s", arguments->password) > 1) {
+ fprintf(stderr, "password read error!\n");
+ }
+ taosSetConsoleEcho(true);
} else {
- tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE);
+ tstrncpy(arguments->password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
}
- } else if (strcmp(argv[i], "-o") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-o need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-o", strlen("-o")))
+ || (0 == strncmp(argv[i], "--output", strlen("--output")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else if (0 == strncmp(argv[i], "--output=", strlen("--output="))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("--output="));
+ } else if (0 == strncmp(argv[i], "-o", strlen("-o"))) {
+ arguments->output_file = (char *)(argv[i++] + strlen("-o"));
+ } else if (strlen("--output") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--output");
+ exit(EXIT_FAILURE);
+ }
+ arguments->output_file = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->output_file = argv[++i];
- } else if (strcmp(argv[i], "-s") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-s need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-s", strlen("-s")))
+ || (0 == strncmp(argv[i], "--sql-file", strlen("--sql-file")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "s");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file="));
+ } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) {
+ arguments->sqlFile = (char *)(argv[i++] + strlen("-s"));
+ } else if (strlen("--sql-file") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--sql-file");
+ exit(EXIT_FAILURE);
+ }
+ arguments->sqlFile = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->sqlFile = argv[++i];
- } else if (strcmp(argv[i], "-q") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n");
+ } else if ((0 == strncmp(argv[i], "-q", strlen("-q")))
+ || (0 == strncmp(argv[i], "--query-mode", strlen("--query-mode")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "q");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "q");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--query-mode=", strlen("--query-mode="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--query-mode=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--query-mode=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-q", strlen("-q"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-q")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("-q")));
+ } else {
+ errorPrintReqArg2(argv[0], "-q");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--query-mode") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--query-mode");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->async_mode = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-T") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-T need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-T", strlen("-T")))
+ || (0 == strncmp(argv[i], "--threads", strlen("--threads")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "T");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "T");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--threads=", strlen("--threads="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--threads=")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("--threads=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-T", strlen("-T"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-T")))) {
+ arguments->nthreads = atoi((char *)(argv[i]+strlen("-T")));
+ } else {
+ errorPrintReqArg2(argv[0], "-T");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--threads") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--threads");
+ exit(EXIT_FAILURE);
+ }
+ arguments->nthreads = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_threads = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-i") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-i need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-i", strlen("-i")))
+ || (0 == strncmp(argv[i], "--insert-interval", strlen("--insert-interval")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "i");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "i");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--insert-interval=", strlen("--insert-interval="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--insert-interval=")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("--insert-interval=")));
+ } else {
+ errorPrintReqArg3(argv[0], "--insert-innterval");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-i", strlen("-i"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-i")))) {
+ arguments->insert_interval = atoi((char *)(argv[i]+strlen("-i")));
+ } else {
+ errorPrintReqArg3(argv[0], "-i");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--insert-interval")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--insert-interval");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insert_interval = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->insert_interval = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-S") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("\n\t%s%s", argv[i], " need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-S", strlen("-S")))
+ || (0 == strncmp(argv[i], "--time-step", strlen("--time-step")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "S");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "S");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--time-step=", strlen("--time-step="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--time-step=")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("--time-step=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-S", strlen("-S"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-S")))) {
+ arguments->async_mode = atoi((char *)(argv[i]+strlen("-S")));
+ } else {
+ errorPrintReqArg2(argv[0], "-S");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--time-step") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--time-step");
+ exit(EXIT_FAILURE);
+ }
+ arguments->async_mode = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->timestamp_step = atoi(argv[++i]);
} else if (strcmp(argv[i], "-qt") == 0) {
if ((argc == i+1)
|| (!isStringNumber(argv[i+1]))) {
@@ -925,98 +1308,308 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->query_times = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-B") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-B need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-B", strlen("-B")))
+ || (0 == strncmp(argv[i], "--interlace-rows", strlen("--interlace-rows")))) {
+ if (strlen("-B") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "B");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "B");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-B")))) {
+ arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B")));
+ } else {
+ errorPrintReqArg2(argv[0], "-B");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--interlace-rows")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--interlace-rows");
+ exit(EXIT_FAILURE);
+ }
+ arguments->interlaceRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->interlace_rows = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-r") == 0) {
- if ((argc == i+1)
- || (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-r need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-r", strlen("-r")))
+ || (0 == strncmp(argv[i], "--rec-per-req", 13))) {
+ if (strlen("-r") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "r");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "r");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--rec-per-req=", strlen("--rec-per-req="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--rec-per-req=")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("--rec-per-req=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-r", strlen("-r"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-r")))) {
+ arguments->reqPerReq = atoi((char *)(argv[i]+strlen("-r")));
+ } else {
+ errorPrintReqArg2(argv[0], "-r");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--rec-per-req")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--rec-per-req");
+ exit(EXIT_FAILURE);
+ }
+ arguments->reqPerReq = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_RPR = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-t") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-t need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-t", strlen("-t")))
+ || (0 == strncmp(argv[i], "--tables", strlen("--tables")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "t");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "t");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--tables=", strlen("--tables="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--tables=")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("--tables=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-t", strlen("-t"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-t")))) {
+ arguments->ntables = atoi((char *)(argv[i]+strlen("-t")));
+ } else {
+ errorPrintReqArg2(argv[0], "-t");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--tables") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--tables");
+ exit(EXIT_FAILURE);
+ }
+ arguments->ntables = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_tables = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-n") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-n need a number following!\n");
+
+ g_totalChildTables = arguments->ntables;
+ } else if ((0 == strncmp(argv[i], "-n", strlen("-n")))
+ || (0 == strncmp(argv[i], "--records", strlen("--records")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "n");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "n");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--records=", strlen("--records="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--records=")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("--records=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-n", strlen("-n"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-n")))) {
+ arguments->insertRows = atoi((char *)(argv[i]+strlen("-n")));
+ } else {
+ errorPrintReqArg2(argv[0], "-n");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--records") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--records");
+ exit(EXIT_FAILURE);
+ }
+ arguments->insertRows = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->num_of_DPT = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-d") == 0) {
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-d need a valid string following!\n");
+ } else if ((0 == strncmp(argv[i], "-d", strlen("-d")))
+ || (0 == strncmp(argv[i], "--database", strlen("--database")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "d");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else if (0 == strncmp(argv[i], "--database=", strlen("--database="))) {
+ arguments->output_file = (char *)(argv[i] + strlen("--database="));
+ } else if (0 == strncmp(argv[i], "-d", strlen("-d"))) {
+ arguments->output_file = (char *)(argv[i] + strlen("-d"));
+ } else if (strlen("--database") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--database");
+ exit(EXIT_FAILURE);
+ }
+ arguments->database = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->database = argv[++i];
- } else if (strcmp(argv[i], "-l") == 0) {
+ } else if ((0 == strncmp(argv[i], "-l", strlen("-l")))
+ || (0 == strncmp(argv[i], "--columns", strlen("--columns")))) {
arguments->demo_mode = false;
- if (argc == i+1) {
- if (!isStringNumber(argv[i+1])) {
- printHelp();
- errorPrint("%s", "\n\t-l need a number following!\n");
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "l");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "l");
+ exit(EXIT_FAILURE);
+ }
+ arguments->columnCount = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--columns=", strlen("--columns="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--columns=")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("--columns=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-l", strlen("-l"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-l")))) {
+ arguments->columnCount = atoi((char *)(argv[i]+strlen("-l")));
+ } else {
+ errorPrintReqArg2(argv[0], "-l");
exit(EXIT_FAILURE);
}
+ } else if (strlen("--columns")== strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--columns");
+ exit(EXIT_FAILURE);
+ }
+ arguments->columnCount = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
}
- arguments->num_of_CPR = atoi(argv[++i]);
- if (arguments->num_of_CPR > MAX_NUM_COLUMNS) {
- printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS);
+ if (arguments->columnCount > MAX_NUM_COLUMNS) {
+ printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS);
prompt();
- arguments->num_of_CPR = MAX_NUM_COLUMNS;
+ arguments->columnCount = MAX_NUM_COLUMNS;
}
- for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) {
- arguments->datatype[col] = "INT";
+ for (int col = DEFAULT_DATATYPE_NUM; col < arguments->columnCount; col ++) {
+ arguments->dataType[col] = "INT";
+ arguments->data_type[col] = TSDB_DATA_TYPE_INT;
}
- for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
- arguments->datatype[col] = NULL;
+ for (int col = arguments->columnCount; col < MAX_NUM_COLUMNS; col++) {
+ arguments->dataType[col] = NULL;
+ arguments->data_type[col] = TSDB_DATA_TYPE_NULL;
}
- } else if (strcmp(argv[i], "-b") == 0) {
+ } else if ((0 == strncmp(argv[i], "-b", strlen("-b")))
+ || (0 == strncmp(argv[i], "--data-type", strlen("--data-type")))) {
arguments->demo_mode = false;
- if (argc == i+1) {
- printHelp();
- errorPrint("%s", "\n\t-b need valid string following!\n");
+
+ char *dataType;
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "b");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else if (0 == strncmp(argv[i], "--data-type=", strlen("--data-type="))) {
+ dataType = (char *)(argv[i] + strlen("--data-type="));
+ } else if (0 == strncmp(argv[i], "-b", strlen("-b"))) {
+ dataType = (char *)(argv[i] + strlen("-b"));
+ } else if (strlen("--data-type") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--data-type");
+ exit(EXIT_FAILURE);
+ }
+ dataType = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- ++i;
- if (strstr(argv[i], ",") == NULL) {
+
+ if (strstr(dataType, ",") == NULL) {
// only one col
- if (strcasecmp(argv[i], "INT")
- && strcasecmp(argv[i], "FLOAT")
- && strcasecmp(argv[i], "TINYINT")
- && strcasecmp(argv[i], "BOOL")
- && strcasecmp(argv[i], "SMALLINT")
- && strcasecmp(argv[i], "BIGINT")
- && strcasecmp(argv[i], "DOUBLE")
- && strcasecmp(argv[i], "BINARY")
- && strcasecmp(argv[i], "TIMESTAMP")
- && strcasecmp(argv[i], "NCHAR")) {
+ if (strcasecmp(dataType, "INT")
+ && strcasecmp(dataType, "FLOAT")
+ && strcasecmp(dataType, "TINYINT")
+ && strcasecmp(dataType, "BOOL")
+ && strcasecmp(dataType, "SMALLINT")
+ && strcasecmp(dataType, "BIGINT")
+ && strcasecmp(dataType, "DOUBLE")
+ && strcasecmp(dataType, "BINARY")
+ && strcasecmp(dataType, "TIMESTAMP")
+ && strcasecmp(dataType, "NCHAR")) {
printHelp();
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
- arguments->datatype[0] = argv[i];
- arguments->datatype[1] = NULL;
+ arguments->dataType[0] = dataType;
+ if (0 == strcasecmp(dataType, "INT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(dataType, "TINYINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strcasecmp(dataType, "SMALLINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(dataType, "BIGINT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(dataType, "FLOAT")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(dataType, "DOUBLE")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strcasecmp(dataType, "BINARY")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strcasecmp(dataType, "NCHAR")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(dataType, "BOOL")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(dataType, "TIMESTAMP")) {
+ arguments->data_type[0] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ arguments->data_type[0] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[1] = NULL;
+ arguments->data_type[1] = TSDB_DATA_TYPE_NULL;
} else {
// more than one col
int index = 0;
- g_dupstr = strdup(argv[i]);
+ g_dupstr = strdup(dataType);
char *running = g_dupstr;
char *token = strsep(&running, ",");
while(token != NULL) {
@@ -1035,106 +1628,309 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
- arguments->datatype[index++] = token;
+
+ if (0 == strcasecmp(token, "INT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_INT;
+ } else if (0 == strcasecmp(token, "FLOAT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(token, "SMALLINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strcasecmp(token, "BIGINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strcasecmp(token, "DOUBLE")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strcasecmp(token, "TINYINT")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strcasecmp(token, "BINARY")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strcasecmp(token, "NCHAR")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strcasecmp(token, "BOOL")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strcasecmp(token, "TIMESTAMP")) {
+ arguments->data_type[index] = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ arguments->dataType[index] = token;
+ index ++;
token = strsep(&running, ",");
if (index >= MAX_NUM_COLUMNS) break;
}
- arguments->datatype[index] = NULL;
- }
- } else if (strcmp(argv[i], "-w") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-w need a number following!\n");
+ arguments->dataType[index] = NULL;
+ arguments->data_type[index] = TSDB_DATA_TYPE_NULL;
+ }
+ } else if ((0 == strncmp(argv[i], "-w", strlen("-w")))
+ || (0 == strncmp(argv[i], "--binwidth", strlen("--binwidth")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "w");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "w");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--binwidth=", strlen("--binwidth="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--binwidth=")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("--binwidth=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-w", strlen("-w"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-w")))) {
+ arguments->binwidth = atoi((char *)(argv[i]+strlen("-w")));
+ } else {
+ errorPrintReqArg2(argv[0], "-w");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--binwidth") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--binwidth");
+ exit(EXIT_FAILURE);
+ }
+ arguments->binwidth = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->len_of_binary = atoi(argv[++i]);
- } else if (strcmp(argv[i], "-m") == 0) {
- if ((argc == i+1) ||
- (isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-m need a letter-initial string following!\n");
+ } else if ((0 == strncmp(argv[i], "-m", strlen("-m")))
+ || (0 == strncmp(argv[i], "--table-prefix", strlen("--table-prefix")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "m");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else if (0 == strncmp(argv[i], "--table-prefix=", strlen("--table-prefix="))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("--table-prefix="));
+ } else if (0 == strncmp(argv[i], "-m", strlen("-m"))) {
+ arguments->tb_prefix = (char *)(argv[i] + strlen("-m"));
+ } else if (strlen("--table-prefix") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--table-prefix");
+ exit(EXIT_FAILURE);
+ }
+ arguments->tb_prefix = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->tb_prefix = argv[++i];
- } else if (strcmp(argv[i], "-N") == 0) {
+ } else if ((strcmp(argv[i], "-N") == 0)
+ || (0 == strcmp(argv[i], "--normal-table"))) {
+ arguments->demo_mode = false;
arguments->use_metric = false;
- } else if (strcmp(argv[i], "-M") == 0) {
+ } else if ((strcmp(argv[i], "-M") == 0)
+ || (0 == strcmp(argv[i], "--random"))) {
arguments->demo_mode = false;
- } else if (strcmp(argv[i], "-x") == 0) {
- arguments->insert_only = false;
- } else if (strcmp(argv[i], "-y") == 0) {
+ } else if ((strcmp(argv[i], "-x") == 0)
+ || (0 == strcmp(argv[i], "--aggr-func"))) {
+ arguments->aggr_func = true;
+ } else if ((strcmp(argv[i], "-y") == 0)
+ || (0 == strcmp(argv[i], "--answer-yes"))) {
arguments->answer_yes = true;
- } else if (strcmp(argv[i], "-g") == 0) {
+ } else if ((strcmp(argv[i], "-g") == 0)
+ || (0 == strcmp(argv[i], "--debug"))) {
arguments->debug_print = true;
} else if (strcmp(argv[i], "-gg") == 0) {
arguments->verbose_print = true;
- } else if (strcmp(argv[i], "-PP") == 0) {
- arguments->performance_print = true;
- } else if (strcmp(argv[i], "-O") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-O need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-R", strlen("-R")))
+ || (0 == strncmp(argv[i], "--disorder-range",
+ strlen("--disorder-range")))) {
+ if (strlen("-R") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "R");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "R");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder-range=",
+ strlen("--disorder-range="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("--disorder-range=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-R")))) {
+ arguments->disorderRange =
+ atoi((char *)(argv[i]+strlen("-R")));
+ } else {
+ errorPrintReqArg2(argv[0], "-R");
+ exit(EXIT_FAILURE);
+ }
+
+ if (arguments->disorderRange < 0) {
+ errorPrint("Invalid disorder range %d, will be set to %d\n",
+ arguments->disorderRange, 1000);
+ arguments->disorderRange = 1000;
+ }
+ } else if (strlen("--disorder-range") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder-range");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRange = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ } else if ((0 == strncmp(argv[i], "-O", strlen("-O")))
+ || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "O");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "O");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--disorder=", strlen("--disorder="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--disorder=")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("--disorder=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-O", strlen("-O"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-O")))) {
+ arguments->disorderRatio = atoi((char *)(argv[i]+strlen("-O")));
+ } else {
+ errorPrintReqArg2(argv[0], "-O");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--disorder") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--disorder");
+ exit(EXIT_FAILURE);
+ }
+ arguments->disorderRatio = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
-
- arguments->disorderRatio = atoi(argv[++i]);
if (arguments->disorderRatio > 50) {
+ errorPrint("Invalid disorder ratio %d, will be set to %d\n",
+ arguments->disorderRatio, 50);
arguments->disorderRatio = 50;
}
if (arguments->disorderRatio < 0) {
+ errorPrint("Invalid disorder ratio %d, will be set to %d\n",
+ arguments->disorderRatio, 0);
arguments->disorderRatio = 0;
}
-
- } else if (strcmp(argv[i], "-R") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-R need a number following!\n");
+ } else if ((0 == strncmp(argv[i], "-a", strlen("-a")))
+ || (0 == strncmp(argv[i], "--replica",
+ strlen("--replica")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "a");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "a");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else if (0 == strncmp(argv[i], "--replica=",
+ strlen("--replica="))) {
+ if (isStringNumber((char *)(argv[i] + strlen("--replica=")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("--replica=")));
+ } else {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ } else if (0 == strncmp(argv[i], "-a", strlen("-a"))) {
+ if (isStringNumber((char *)(argv[i] + strlen("-a")))) {
+ arguments->replica =
+ atoi((char *)(argv[i]+strlen("-a")));
+ } else {
+ errorPrintReqArg2(argv[0], "-a");
+ exit(EXIT_FAILURE);
+ }
+ } else if (strlen("--replica") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ } else if (!isStringNumber(argv[i+1])) {
+ errorPrintReqArg2(argv[0], "--replica");
+ exit(EXIT_FAILURE);
+ }
+ arguments->replica = atoi(argv[++i]);
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
exit(EXIT_FAILURE);
}
- arguments->disorderRange = atoi(argv[++i]);
- if (arguments->disorderRange < 0)
- arguments->disorderRange = 1000;
-
- } else if (strcmp(argv[i], "-a") == 0) {
- if ((argc == i+1) ||
- (!isStringNumber(argv[i+1]))) {
- printHelp();
- errorPrint("%s", "\n\t-a need a number following!\n");
- exit(EXIT_FAILURE);
- }
- arguments->replica = atoi(argv[++i]);
if (arguments->replica > 3 || arguments->replica < 1) {
+ errorPrint("Invalid replica value %d, will be set to %d\n",
+ arguments->replica, 1);
arguments->replica = 1;
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
if (arguments->method_of_delete > 3) {
- errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ errorPrint("%s", "\n\t-D need a value (0~3) number following!\n");
exit(EXIT_FAILURE);
}
- } else if ((strcmp(argv[i], "--version") == 0) ||
- (strcmp(argv[i], "-V") == 0)){
+ } else if ((strcmp(argv[i], "--version") == 0)
+ || (strcmp(argv[i], "-V") == 0)) {
printVersion();
exit(0);
- } else if (strcmp(argv[i], "--help") == 0) {
+ } else if ((strcmp(argv[i], "--help") == 0)
+ || (strcmp(argv[i], "-?") == 0)) {
printHelp();
exit(0);
+ } else if (strcmp(argv[i], "--usage") == 0) {
+ printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\
+ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\
+ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\
+ [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\
+ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\
+ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\
+ [--help] [--usage] [--version]\n");
+ exit(0);
} else {
- printHelp();
- errorPrint("%s", "ERROR: wrong options\n");
+ // to simulate argp_option output
+ if (strlen(argv[i]) > 2) {
+ if (0 == strncmp(argv[i], "--", 2)) {
+ fprintf(stderr, "%s: unrecognized options '%s'\n", argv[0], argv[i]);
+ } else if (0 == strncmp(argv[i], "-", 1)) {
+ char tmp[2] = {0};
+ tstrncpy(tmp, argv[i]+1, 2);
+ fprintf(stderr, "%s: invalid options -- '%s'\n", argv[0], tmp);
+ } else {
+ fprintf(stderr, "%s: Too many arguments\n", argv[0]);
+ }
+ } else {
+ fprintf(stderr, "%s invalid options -- '%s'\n", argv[0],
+ (char *)((char *)argv[i])+1);
+ }
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
exit(EXIT_FAILURE);
}
}
int columnCount;
for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) {
- if (g_args.datatype[columnCount] == NULL) {
+ if (g_args.dataType[columnCount] == NULL) {
break;
}
}
@@ -1142,9 +1938,58 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
if (0 == columnCount) {
ERROR_EXIT("data type error!");
}
- g_args.num_of_CPR = columnCount;
+ g_args.columnCount = columnCount;
+
+ g_args.lenOfOneRow = 20; // timestamp
+ for (int c = 0; c < g_args.columnCount; c++) {
+ switch(g_args.data_type[c]) {
+ case TSDB_DATA_TYPE_BINARY:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ g_args.lenOfOneRow += g_args.binwidth + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ g_args.lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ g_args.lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ g_args.lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ g_args.lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ g_args.lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ g_args.lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ g_args.lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ g_args.lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
- if (((arguments->debug_print) && (arguments->metaFile == NULL))
+ if (((arguments->debug_print) && (NULL != arguments->metaFile))
|| arguments->verbose_print) {
printf("###################################################################\n");
printf("# meta file: %s\n", arguments->metaFile);
@@ -1155,11 +2000,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf("# Password: %s\n", arguments->password);
printf("# Use metric: %s\n",
arguments->use_metric ? "true" : "false");
- if (*(arguments->datatype)) {
+ if (*(arguments->dataType)) {
printf("# Specified data type: ");
- for (int i = 0; i < MAX_NUM_COLUMNS; i++)
- if (arguments->datatype[i])
- printf("%s,", arguments->datatype[i]);
+ for (int c = 0; c < MAX_NUM_COLUMNS; c++)
+ if (arguments->dataType[c])
+ printf("%s,", arguments->dataType[c]);
else
break;
printf("\n");
@@ -1167,15 +2012,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
printf("# Number of records per req: %u\n",
- arguments->num_of_RPR);
+ arguments->reqPerReq);
printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
- printf("# Length of Binary: %d\n", arguments->len_of_binary);
- printf("# Number of Threads: %d\n", arguments->num_of_threads);
+ printf("# Length of Binary: %d\n", arguments->binwidth);
+ printf("# Number of Threads: %d\n", arguments->nthreads);
printf("# Number of Tables: %"PRId64"\n",
- arguments->num_of_tables);
+ arguments->ntables);
printf("# Number of Data per Table: %"PRId64"\n",
- arguments->num_of_DPT);
+ arguments->insertRows);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
if (arguments->disorderRatio) {
@@ -1201,31 +2046,20 @@ static void tmfclose(FILE *fp) {
static void tmfree(char *buf) {
if (NULL != buf) {
free(buf);
+ buf = NULL;
}
}
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
- int i;
- TAOS_RES *res = NULL;
- int32_t code = -1;
- for (i = 0; i < 5 /* retry */; i++) {
- if (NULL != res) {
- taos_free_result(res);
- res = NULL;
- }
+ verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
- res = taos_query(taos, command);
- code = taos_errno(res);
- if (0 == code) {
- break;
- }
- }
+ TAOS_RES *res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
- verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
if (code != 0) {
if (!quiet) {
- errorPrint("Failed to execute %s, reason: %s\n",
+ errorPrint2("Failed to execute <%s>, reason: %s\n",
command, taos_errstr(res));
}
taos_free_result(res);
@@ -1247,7 +2081,7 @@ static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{
pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
if (pThreadInfo->fp == NULL) {
- errorPrint(
+ errorPrint2(
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
__func__, __LINE__, pThreadInfo->filePath);
return;
@@ -1266,7 +2100,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
- errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
+ errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
return ;
}
@@ -1306,7 +2140,7 @@ static void selectAndGetResult(
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
TAOS_RES *res = taos_query(pThreadInfo->taos, command);
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return;
@@ -1325,23 +2159,23 @@ static void selectAndGetResult(
}
} else {
- errorPrint("%s() LN%d, unknown query mode: %s\n",
+ errorPrint2("%s() LN%d, unknown query mode: %s\n",
__func__, __LINE__, g_queryInfo.queryMode);
}
}
-static char *rand_bool_str(){
+static char *rand_bool_str() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randbool_buff + (cursor * BOOL_BUFF_LEN);
+ return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN);
}
-static int32_t rand_bool(){
+static int32_t rand_bool() {
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 2;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 2;
}
static char *rand_tinyint_str()
@@ -1349,15 +2183,16 @@ static char *rand_tinyint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randtinyint_buff + (cursor * TINYINT_BUFF_LEN);
+ return g_randtinyint_buff +
+ ((cursor % MAX_PREPARED_RAND) * TINYINT_BUFF_LEN);
}
static int32_t rand_tinyint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 128;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 128;
}
static char *rand_smallint_str()
@@ -1365,15 +2200,16 @@ static char *rand_smallint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randsmallint_buff + (cursor * SMALLINT_BUFF_LEN);
+ return g_randsmallint_buff +
+ ((cursor % MAX_PREPARED_RAND) * SMALLINT_BUFF_LEN);
}
static int32_t rand_smallint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor] % 32767;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND] % 32767;
}
static char *rand_int_str()
@@ -1381,15 +2217,15 @@ static char *rand_int_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randint_buff + (cursor * INT_BUFF_LEN);
+ return g_randint_buff + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN);
}
static int32_t rand_int()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randint[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randint[cursor % MAX_PREPARED_RAND];
}
static char *rand_bigint_str()
@@ -1397,15 +2233,16 @@ static char *rand_bigint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randbigint_buff + (cursor * BIGINT_BUFF_LEN);
+ return g_randbigint_buff +
+ ((cursor % MAX_PREPARED_RAND) * BIGINT_BUFF_LEN);
}
static int64_t rand_bigint()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randbigint[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randbigint[cursor % MAX_PREPARED_RAND];
}
static char *rand_float_str()
@@ -1413,7 +2250,7 @@ static char *rand_float_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_randfloat_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
@@ -1421,8 +2258,8 @@ static float rand_float()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return g_randfloat[cursor];
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return g_randfloat[cursor % MAX_PREPARED_RAND];
}
static char *demo_current_float_str()
@@ -1430,15 +2267,17 @@ static char *demo_current_float_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_current_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_rand_current_buff +
+ ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
static float UNUSED_FUNC demo_current_float()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000);
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10)
+ + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000);
}
static char *demo_voltage_int_str()
@@ -1446,29 +2285,31 @@ static char *demo_voltage_int_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_voltage_buff + (cursor * INT_BUFF_LEN);
+ return g_rand_voltage_buff +
+ ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN);
}
static int32_t UNUSED_FUNC demo_voltage_int()
{
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return 215 + g_randint[cursor] % 10;
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10;
}
static char *demo_phase_float_str() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_phase_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
-static float UNUSED_FUNC demo_phase_float(){
+static float UNUSED_FUNC demo_phase_float() {
static int cursor;
cursor++;
- cursor = cursor % MAX_PREPARED_RAND;
- return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360);
+ if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
+ return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10
+ + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360);
}
#if 0
@@ -1542,7 +2383,7 @@ static void init_rand_data() {
g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND);
assert(g_randdouble_buff);
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
+ for (int i = 0; i < MAX_PREPARED_RAND; i++) {
g_randint[i] = (int)(taosRandom() % 65535);
sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d",
g_randint[i]);
@@ -1599,10 +2440,11 @@ static void init_rand_data() {
static int printfInsertMeta() {
SHOW_PARSE_RESULT_START();
- if (g_args.demo_mode)
- printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n");
- else
+ if (g_args.demo_mode) {
+ printf("\ntaosdemo is simulating data generated by power equipment monitoring...\n\n");
+ } else {
printf("\ntaosdemo is simulating random data as you request..\n\n");
+ }
if (g_args.iface != INTERFACE_BUT) {
// first time if no iface specified
@@ -1619,11 +2461,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n",
- g_Dbs.threadCountByCreateTbl);
+ g_Dbs.threadCountForCreateTbl);
printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
printf("number of records per req: \033[33m%u\033[0m\n",
- g_args.num_of_RPR);
+ g_args.reqPerReq);
printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
@@ -1634,9 +2476,9 @@ static int printfInsertMeta() {
printf(" database[%d] name: \033[33m%s\033[0m\n",
i, g_Dbs.db[i].dbName);
if (0 == g_Dbs.db[i].drop) {
- printf(" drop: \033[33mno\033[0m\n");
+ printf(" drop: \033[33m no\033[0m\n");
} else {
- printf(" drop: \033[33myes\033[0m\n");
+ printf(" drop: \033[33m yes\033[0m\n");
}
if (g_Dbs.db[i].dbCfg.blocks > 0) {
@@ -1688,9 +2530,7 @@ static int printfInsertMeta() {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
-#if NANO_SECOND_ENABLED == 1
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))
-#endif
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) {
printf(" precision: \033[33m%s\033[0m\n",
g_Dbs.db[i].dbCfg.precision);
@@ -1707,7 +2547,7 @@ static int printfInsertMeta() {
printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
printf(" autoCreateTable: \033[33m%s\033[0m\n", "no");
@@ -1747,9 +2587,9 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
- printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n");
+ printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n");
}else {
- printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
+ printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n");
}
*/
printf(" interlaceRows: \033[33m%u\033[0m\n",
@@ -1829,8 +2669,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "configDir: %s\n", configDir);
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
- fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR);
+ fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountForCreateTbl);
+ fprintf(fp, "number of records per req: %u\n", g_args.reqPerReq);
fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
@@ -1881,9 +2721,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
-#if NANO_SECOND_ENABLED == 1
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))
-#endif
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
fprintf(fp, " precision: %s\n",
g_Dbs.db[i].dbCfg.precision);
@@ -1899,7 +2737,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " super table[%d]:\n", j);
fprintf(fp, " stbName: %s\n",
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
fprintf(fp, " autoCreateTable: %s\n", "no");
@@ -2058,7 +2896,7 @@ static void printfQueryMeta() {
printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
- g_queryInfo.superQueryInfo.sTblName);
+ g_queryInfo.superQueryInfo.stbName);
printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
@@ -2086,10 +2924,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
-#if NANO_SECOND_ENABLED == 1
} if (precision == TSDB_TIME_PRECISION_NANO) {
tt = (time_t)(val / 1000000000);
-#endif
} else {
tt = (time_t)(val / 1000);
}
@@ -2111,10 +2947,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
-#if NANO_SECOND_ENABLED == 1
} else if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
-#endif
} else {
sprintf(buf + pos, ".%03d", (int)(val % 1000));
}
@@ -2133,36 +2967,45 @@ static void xDumpFieldToFile(FILE* fp, const char* val,
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
- fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
+ fprintf(fp, "%d", ((((int32_t)(*((int8_t*)val))) == 1) ? 1 : 0));
break;
+
case TSDB_DATA_TYPE_TINYINT:
fprintf(fp, "%d", *((int8_t *)val));
break;
+
case TSDB_DATA_TYPE_SMALLINT:
fprintf(fp, "%d", *((int16_t *)val));
break;
+
case TSDB_DATA_TYPE_INT:
fprintf(fp, "%d", *((int32_t *)val));
break;
+
case TSDB_DATA_TYPE_BIGINT:
- fprintf(fp, "%" PRId64, *((int64_t *)val));
+ fprintf(fp, "%"PRId64"", *((int64_t *)val));
break;
+
case TSDB_DATA_TYPE_FLOAT:
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
break;
+
case TSDB_DATA_TYPE_DOUBLE:
fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
break;
+
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
memcpy(buf, val, length);
buf[length] = 0;
fprintf(fp, "\'%s\'", buf);
break;
+
case TSDB_DATA_TYPE_TIMESTAMP:
formatTimestamp(buf, *(int64_t*)val, precision);
fprintf(fp, "'%s'", buf);
break;
+
default:
break;
}
@@ -2176,7 +3019,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
FILE* fp = fopen(fname, "at");
if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file: %s\n",
+ errorPrint2("%s() LN%d, failed to open file: %s\n",
__func__, __LINE__, fname);
return -1;
}
@@ -2223,7 +3066,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
int32_t code = taos_errno(res);
if (code != 0) {
- errorPrint( "failed to run , reason: %s\n",
+ errorPrint2("failed to run , reason: %s\n",
taos_errstr(res));
return -1;
}
@@ -2239,7 +3082,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (dbInfos[count] == NULL) {
- errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count);
+ errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count);
return -1;
}
@@ -2392,7 +3235,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
request_buf = malloc(req_buf_len);
if (NULL == request_buf) {
- errorPrint("%s", "ERROR, cannot allocate memory.\n");
+ errorPrint("%s", "cannot allocate memory.\n");
exit(EXIT_FAILURE);
}
@@ -2531,7 +3374,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
if (NULL == dataBuf) {
- errorPrint("%s() LN%d, calloc failed! size:%d\n",
+ errorPrint2("%s() LN%d, calloc failed! size:%d\n",
__func__, __LINE__, TSDB_MAX_SQL_LEN+1);
return NULL;
}
@@ -2596,7 +3439,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
if ((g_args.demo_mode) && (i == 0)) {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
- "%"PRId64",", tableSeq % 10);
+ "%"PRId64",", (tableSeq % 10) + 1);
} else {
dataLen += snprintf(dataBuf + dataLen,
TSDB_MAX_SQL_LEN - dataLen,
@@ -2631,7 +3474,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
"%"PRId64",", rand_bigint());
} else {
- errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType);
+ errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType);
tmfree(dataBuf);
return NULL;
}
@@ -2649,29 +3492,50 @@ static int calcRowLen(SSuperTable* superTbls) {
for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) {
char* dataType = superTbls->columns[colIndex].dataType;
- if (strcasecmp(dataType, "BINARY") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- lenOfOneRow += INT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- lenOfOneRow += BIGINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- lenOfOneRow += SMALLINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- lenOfOneRow += TINYINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- lenOfOneRow += BOOL_BUFF_LEN;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- lenOfOneRow += FLOAT_BUFF_LEN;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- lenOfOneRow += DOUBLE_BUFF_LEN;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- lenOfOneRow += TIMESTAMP_BUFF_LEN;
- } else {
- errorPrint("get error data type : %s\n", dataType);
- exit(EXIT_FAILURE);
+ switch(superTbls->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ lenOfOneRow += superTbls->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ errorPrint2("get error data type : %s\n", dataType);
+ exit(EXIT_FAILURE);
}
}
@@ -2701,7 +3565,7 @@ static int calcRowLen(SSuperTable* superTbls) {
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
} else {
- errorPrint("get error tag type : %s\n", dataType);
+ errorPrint2("get error tag type : %s\n", dataType);
exit(EXIT_FAILURE);
}
}
@@ -2711,9 +3575,8 @@ static int calcRowLen(SSuperTable* superTbls) {
return 0;
}
-
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
- char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
+ char* dbName, char* stbName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[1024] = "\0";
@@ -2724,21 +3587,19 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
- if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
- limit, offset);
- }
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
+ limit, offset);
//get all child table name use cmd: select tbname from superTblName;
snprintf(command, 1024, "select tbname from %s.%s %s",
- dbName, sTblName, limitBuf);
+ dbName, stbName, limitBuf);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to run command %s\n",
+ errorPrint2("%s() LN%d, failed to run command %s\n",
__func__, __LINE__, command);
exit(EXIT_FAILURE);
}
@@ -2750,7 +3611,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
if (NULL == childTblName) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
}
@@ -2760,7 +3621,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
int32_t* len = taos_fetch_lengths(res);
if (0 == strlen((char *)row[0])) {
- errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n",
__func__, __LINE__, count);
exit(EXIT_FAILURE);
}
@@ -2781,8 +3642,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
tmfree(childTblName);
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
- __func__, __LINE__, dbName, sTblName);
+ errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n",
+ __func__, __LINE__, dbName, stbName);
exit(EXIT_FAILURE);
}
}
@@ -2797,10 +3658,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
}
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
- char* sTblName, char** childTblNameOfSuperTbl,
+ char* stbName, char** childTblNameOfSuperTbl,
int64_t* childTblCountOfSuperTbl) {
- return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
+ return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, stbName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
-1, 0);
}
@@ -2814,7 +3675,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
int count = 0;
//get schema use cmd: describe superTblName;
- snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName);
+ snprintf(command, 1024, "describe %s.%s", dbName, superTbls->stbName);
res = taos_query(taos, command);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -2840,6 +3701,39 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(DATATYPE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->tags[tagIndex].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->tags[tagIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
superTbls->tags[tagIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->tags[tagIndex].note,
@@ -2851,16 +3745,51 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
tstrncpy(superTbls->columns[columnIndex].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
+
tstrncpy(superTbls->columns[columnIndex].dataType,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(DATATYPE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1);
+ if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->columns[columnIndex].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->columns[columnIndex].data_type = TSDB_DATA_TYPE_NULL;
+ }
superTbls->columns[columnIndex].dataLen =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(superTbls->columns[columnIndex].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(NOTE_BUFF_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1);
+
columnIndex++;
}
count++;
@@ -2878,11 +3807,11 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
int childTblCount = 10000;
superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (superTbls->childTblName == NULL) {
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
return -1;
}
getAllChildNameOfSuperTable(taos, dbName,
- superTbls->sTblName,
+ superTbls->stbName,
&superTbls->childTblName,
&superTbls->childTblCount);
}
@@ -2898,79 +3827,98 @@ static int createSuperTable(
assert(command);
char cols[COL_BUFFER_LEN] = "\0";
- int colIndex;
int len = 0;
int lenOfOneRow = 0;
if (superTbl->columnCount == 0) {
- errorPrint("%s() LN%d, super table column count is %d\n",
+ errorPrint2("%s() LN%d, super table column count is %d\n",
__func__, __LINE__, superTbl->columnCount);
free(command);
return -1;
}
- for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
- char* dataType = superTbl->columns[colIndex].dataType;
+ for (int colIndex = 0; colIndex < superTbl->columnCount; colIndex++) {
- if (strcasecmp(dataType, "BINARY") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ",C%d %s(%d)", colIndex, "BINARY",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "NCHAR") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ",C%d %s(%d)", colIndex, "NCHAR",
- superTbl->columns[colIndex].dataLen);
- lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
- } else if (strcasecmp(dataType, "INT") == 0) {
- if ((g_args.demo_mode) && (colIndex == 1)) {
+ switch(superTbl->columns[colIndex].data_type) {
+ case TSDB_DATA_TYPE_BINARY:
len += snprintf(cols + len, COL_BUFFER_LEN - len,
- ", VOLTAGE INT");
- } else {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
- }
- lenOfOneRow += INT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BIGINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "BIGINT");
- lenOfOneRow += BIGINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "SMALLINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "SMALLINT");
- lenOfOneRow += SMALLINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "TINYINT") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
- lenOfOneRow += TINYINT_BUFF_LEN;
- } else if (strcasecmp(dataType, "BOOL") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
- lenOfOneRow += BOOL_BUFF_LEN;
- } else if (strcasecmp(dataType, "FLOAT") == 0) {
- if (g_args.demo_mode) {
- if (colIndex == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
- } else if (colIndex == 2) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
+ ",C%d %s(%d)", colIndex, "BINARY",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ",C%d %s(%d)", colIndex, "NCHAR",
+ superTbl->columns[colIndex].dataLen);
+ lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (colIndex == 1)) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len,
+ ", VOLTAGE INT");
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT");
}
- } else {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
- }
+ lenOfOneRow += INT_BUFF_LEN;
+ break;
- lenOfOneRow += FLOAT_BUFF_LEN;
- } else if (strcasecmp(dataType, "DOUBLE") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "DOUBLE");
- lenOfOneRow += DOUBLE_BUFF_LEN;
- } else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
- len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
- colIndex, "TIMESTAMP");
- lenOfOneRow += TIMESTAMP_BUFF_LEN;
- } else {
- taos_close(taos);
- free(command);
- errorPrint("%s() LN%d, config error data type : %s\n",
- __func__, __LINE__, dataType);
- exit(EXIT_FAILURE);
+ case TSDB_DATA_TYPE_BIGINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "BIGINT");
+ lenOfOneRow += BIGINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "SMALLINT");
+ lenOfOneRow += SMALLINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT");
+ lenOfOneRow += TINYINT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL");
+ lenOfOneRow += BOOL_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (colIndex == 0) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
+ } else if (colIndex == 2) {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
+ }
+ } else {
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT");
+ }
+
+ lenOfOneRow += FLOAT_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "DOUBLE");
+ lenOfOneRow += DOUBLE_BUFF_LEN;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s",
+ colIndex, "TIMESTAMP");
+ lenOfOneRow += TIMESTAMP_BUFF_LEN;
+ break;
+
+ default:
+ taos_close(taos);
+ free(command);
+ errorPrint2("%s() LN%d, config error data type : %s\n",
+ __func__, __LINE__, superTbl->columns[colIndex].dataType);
+ exit(EXIT_FAILURE);
}
}
@@ -2981,7 +3929,7 @@ static int createSuperTable(
if (NULL == superTbl->colsOfCreateChildTable) {
taos_close(taos);
free(command);
- errorPrint("%s() LN%d, Failed when calloc, size:%d",
+ errorPrint2("%s() LN%d, Failed when calloc, size:%d",
__func__, __LINE__, len+1);
exit(EXIT_FAILURE);
}
@@ -2991,7 +3939,7 @@ static int createSuperTable(
__func__, __LINE__, superTbl->colsOfCreateChildTable);
if (superTbl->tagCount == 0) {
- errorPrint("%s() LN%d, super table tag count is %d\n",
+ errorPrint2("%s() LN%d, super table tag count is %d\n",
__func__, __LINE__, superTbl->tagCount);
free(command);
return -1;
@@ -3058,7 +4006,7 @@ static int createSuperTable(
} else {
taos_close(taos);
free(command);
- errorPrint("%s() LN%d, config error tag type : %s\n",
+ errorPrint2("%s() LN%d, config error tag type : %s\n",
__func__, __LINE__, dataType);
exit(EXIT_FAILURE);
}
@@ -3070,16 +4018,16 @@ static int createSuperTable(
superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
snprintf(command, BUFFER_SIZE,
- "create table if not exists %s.%s (ts timestamp%s) tags %s",
- dbName, superTbl->sTblName, cols, tags);
+ "CREATE TABLE IF NOT EXISTS %s.%s (ts TIMESTAMP%s) TAGS %s",
+ dbName, superTbl->stbName, cols, tags);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
- errorPrint( "create supertable %s failed!\n\n",
- superTbl->sTblName);
+ errorPrint2("create supertable %s failed!\n\n",
+ superTbl->stbName);
free(command);
return -1;
}
- debugPrint("create supertable %s success!\n\n", superTbl->sTblName);
+ debugPrint("create supertable %s success!\n\n", superTbl->stbName);
free(command);
return 0;
}
@@ -3089,7 +4037,7 @@ int createDatabasesAndStables(char *command) {
int ret = 0;
taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
return -1;
}
@@ -3103,42 +4051,42 @@ int createDatabasesAndStables(char *command) {
int dataLen = 0;
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, "create database if not exists %s",
+ BUFFER_SIZE - dataLen, "CREATE DATABASE IF NOT EXISTS %s",
g_Dbs.db[i].dbName);
if (g_Dbs.db[i].dbCfg.blocks > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " blocks %d",
+ BUFFER_SIZE - dataLen, " BLOCKS %d",
g_Dbs.db[i].dbCfg.blocks);
}
if (g_Dbs.db[i].dbCfg.cache > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cache %d",
+ BUFFER_SIZE - dataLen, " CACHE %d",
g_Dbs.db[i].dbCfg.cache);
}
if (g_Dbs.db[i].dbCfg.days > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " days %d",
+ BUFFER_SIZE - dataLen, " DAYS %d",
g_Dbs.db[i].dbCfg.days);
}
if (g_Dbs.db[i].dbCfg.keep > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " keep %d",
+ BUFFER_SIZE - dataLen, " KEEP %d",
g_Dbs.db[i].dbCfg.keep);
}
if (g_Dbs.db[i].dbCfg.quorum > 1) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " quorum %d",
+ BUFFER_SIZE - dataLen, " QUORUM %d",
g_Dbs.db[i].dbCfg.quorum);
}
if (g_Dbs.db[i].dbCfg.replica > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " replica %d",
+ BUFFER_SIZE - dataLen, " REPLICA %d",
g_Dbs.db[i].dbCfg.replica);
}
if (g_Dbs.db[i].dbCfg.update > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " update %d",
+ BUFFER_SIZE - dataLen, " UPDATE %d",
g_Dbs.db[i].dbCfg.update);
}
//if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) {
@@ -3147,17 +4095,17 @@ int createDatabasesAndStables(char *command) {
//}
if (g_Dbs.db[i].dbCfg.minRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " minrows %d",
+ BUFFER_SIZE - dataLen, " MINROWS %d",
g_Dbs.db[i].dbCfg.minRows);
}
if (g_Dbs.db[i].dbCfg.maxRows > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " maxrows %d",
+ BUFFER_SIZE - dataLen, " MAXROWS %d",
g_Dbs.db[i].dbCfg.maxRows);
}
if (g_Dbs.db[i].dbCfg.comp > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " comp %d",
+ BUFFER_SIZE - dataLen, " COMP %d",
g_Dbs.db[i].dbCfg.comp);
}
if (g_Dbs.db[i].dbCfg.walLevel > 0) {
@@ -3167,18 +4115,16 @@ int createDatabasesAndStables(char *command) {
}
if (g_Dbs.db[i].dbCfg.cacheLast > 0) {
dataLen += snprintf(command + dataLen,
- BUFFER_SIZE - dataLen, " cachelast %d",
+ BUFFER_SIZE - dataLen, " CACHELAST %d",
g_Dbs.db[i].dbCfg.cacheLast);
}
if (g_Dbs.db[i].dbCfg.fsync > 0) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
- " fsync %d", g_Dbs.db[i].dbCfg.fsync);
+ " FSYNC %d", g_Dbs.db[i].dbCfg.fsync);
}
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
-#if NANO_SECOND_ENABLED == 1
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
"ns", 2))
-#endif
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision,
"us", 2))) {
dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen,
@@ -3187,7 +4133,7 @@ int createDatabasesAndStables(char *command) {
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos);
- errorPrint( "\ncreate database %s failed!\n\n",
+ errorPrint("\ncreate database %s failed!\n\n",
g_Dbs.db[i].dbName);
return -1;
}
@@ -3201,7 +4147,7 @@ int createDatabasesAndStables(char *command) {
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
- g_Dbs.db[i].superTbls[j].sTblName);
+ g_Dbs.db[i].superTbls[j].stbName);
ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
if ((ret != 0) || (g_Dbs.db[i].drop)) {
@@ -3217,8 +4163,8 @@ int createDatabasesAndStables(char *command) {
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
&g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
- errorPrint("\nget super table %s.%s info failed!\n\n",
- g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
+ errorPrint2("\nget super table %s.%s info failed!\n\n",
+ g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].stbName);
continue;
}
@@ -3245,7 +4191,7 @@ static void* createTable(void *sarg)
pThreadInfo->buffer = calloc(buff_len, 1);
if (pThreadInfo->buffer == NULL) {
- errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -3260,14 +4206,15 @@ static void* createTable(void *sarg)
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(pThreadInfo->buffer, buff_len,
- "create table if not exists %s.%s%"PRIu64" %s;",
+ "CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
+ batchNum ++;
} else {
if (stbInfo == NULL) {
free(pThreadInfo->buffer);
- errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
+ errorPrint2("%s() LN%d, use metric, but super table info is NULL\n",
__func__, __LINE__);
exit(EXIT_FAILURE);
} else {
@@ -3275,7 +4222,7 @@ static void* createTable(void *sarg)
batchNum = 0;
memset(pThreadInfo->buffer, 0, buff_len);
len += snprintf(pThreadInfo->buffer + len,
- buff_len - len, "create table ");
+ buff_len - len, "CREATE TABLE ");
}
char* tagsValBuf = NULL;
@@ -3300,7 +4247,7 @@ static void* createTable(void *sarg)
"if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
pThreadInfo->db_name, stbInfo->childTblPrefix,
i, pThreadInfo->db_name,
- stbInfo->sTblName, tagsValBuf);
+ stbInfo->stbName, tagsValBuf);
free(tagsValBuf);
batchNum++;
if ((batchNum < stbInfo->batchCreateTableNum)
@@ -3312,14 +4259,16 @@ static void* createTable(void *sarg)
}
len = 0;
+
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
- NO_INSERT_TYPE, false)){
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ NO_INSERT_TYPE, false)) {
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
free(pThreadInfo->buffer);
return NULL;
}
+ pThreadInfo->tables_created += batchNum;
- uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
@@ -3330,7 +4279,7 @@ static void* createTable(void *sarg)
if (0 != len) {
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
NO_INSERT_TYPE, false)) {
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
}
}
@@ -3375,7 +4324,7 @@ static int startMultiThreadCreateChildTable(
db_name,
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
- errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n",
__func__, __LINE__, taos_errstr(NULL));
free(pids);
free(infos);
@@ -3389,6 +4338,7 @@ static int startMultiThreadCreateChildTable(
pThreadInfo->use_metric = true;
pThreadInfo->cols = cols;
pThreadInfo->minDelay = UINT64_MAX;
+ pThreadInfo->tables_created = 0;
pthread_create(pids + i, NULL, createTable, pThreadInfo);
}
@@ -3399,6 +4349,8 @@ static int startMultiThreadCreateChildTable(
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
+
+ g_actualChildTables += pThreadInfo->tables_created;
}
free(pids);
@@ -3425,14 +4377,13 @@ static void createChildTables() {
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
uint64_t startFrom = 0;
- g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
__func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable(
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
- g_Dbs.threadCountByCreateTbl,
+ g_Dbs.threadCountForCreateTbl,
startFrom,
g_Dbs.db[i].superTbls[j].childTblCount,
g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j]));
@@ -3441,15 +4392,15 @@ static void createChildTables() {
} else {
// normal table
len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP");
- for (int j = 0; j < g_args.num_of_CPR; j++) {
- if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
- || (strncasecmp(g_args.datatype[j],
+ for (int j = 0; j < g_args.columnCount; j++) {
+ if ((strncasecmp(g_args.dataType[j], "BINARY", strlen("BINARY")) == 0)
+ || (strncasecmp(g_args.dataType[j],
"NCHAR", strlen("NCHAR")) == 0)) {
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
- ",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
+ ",C%d %s(%d)", j, g_args.dataType[j], g_args.binwidth);
} else {
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
- ",C%d %s", j, g_args.datatype[j]);
+ ",C%d %s", j, g_args.dataType[j]);
}
len = strlen(tblColsBuf);
}
@@ -3458,12 +4409,12 @@ static void createChildTables() {
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
__func__, __LINE__,
- g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
+ g_Dbs.db[i].dbName, g_args.ntables, tblColsBuf);
startMultiThreadCreateChildTable(
tblColsBuf,
- g_Dbs.threadCountByCreateTbl,
+ g_Dbs.threadCountForCreateTbl,
0,
- g_args.num_of_tables,
+ g_args.ntables,
g_Dbs.db[i].dbName,
NULL);
}
@@ -3541,7 +4492,7 @@ static int readTagFromCsvFileToMem(SSuperTable * stbInfo) {
/*
Read 10000 lines at most. If more than 10000 lines, continue to read after using
*/
-static int readSampleFromCsvFileToMem(
+static int generateSampleFromCsvForStb(
SSuperTable* stbInfo) {
size_t n = 0;
ssize_t readLen = 0;
@@ -3550,19 +4501,19 @@ static int readSampleFromCsvFileToMem(
FILE* fp = fopen(stbInfo->sampleFile, "r");
if (fp == NULL) {
- errorPrint( "Failed to open sample file: %s, reason:%s\n",
+ errorPrint("Failed to open sample file: %s, reason:%s\n",
stbInfo->sampleFile, strerror(errno));
return -1;
}
assert(stbInfo->sampleDataBuf);
memset(stbInfo->sampleDataBuf, 0,
- MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow);
+ MAX_SAMPLES * stbInfo->lenOfOneRow);
while(1) {
readLen = tgetline(&line, &n, fp);
if (-1 == readLen) {
if(0 != fseek(fp, 0, SEEK_SET)) {
- errorPrint( "Failed to fseek file: %s, reason:%s\n",
+ errorPrint("Failed to fseek file: %s, reason:%s\n",
stbInfo->sampleFile, strerror(errno));
fclose(fp);
return -1;
@@ -3588,7 +4539,7 @@ static int readSampleFromCsvFileToMem(
line, readLen);
getRows++;
- if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) {
+ if (getRows == MAX_SAMPLES) {
break;
}
}
@@ -3605,7 +4556,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// columns
cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
if (columns && columns->type != cJSON_Array) {
- printf("ERROR: failed to read json, columns not found\n");
+ errorPrint("%s", "failed to read json, columns not found\n");
goto PARSE_OVER;
} else if (NULL == columns) {
superTbls->columnCount = 0;
@@ -3615,8 +4566,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
int columnSize = cJSON_GetArraySize(columns);
if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("failed to read json, column size overflow, max column size is %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
@@ -3634,8 +4585,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column count not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3646,8 +4596,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(column, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d: failed to read json, column type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column type not found\n");
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
@@ -3669,33 +4618,69 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
tstrncpy(superTbls->columns[index].dataType,
columnCase.dataType,
min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1));
+
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
}
if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_NUM_COLUMNS);
+ errorPrint("failed to read json, column size overflow, allowed max column size is %d\n",
+ MAX_NUM_COLUMNS);
goto PARSE_OVER;
}
superTbls->columnCount = index;
+ for (int c = 0; c < superTbls->columnCount; c++) {
+ if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->columns[c].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->columns[c].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
count = 1;
index = 0;
// tags
cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
if (!tags || tags->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, tags not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tags not found\n");
goto PARSE_OVER;
}
int tagSize = cJSON_GetArraySize(tags);
if (tagSize > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, max tag size is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
@@ -3709,7 +4694,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- printf("ERROR: failed to read json, column count not found\n");
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3720,8 +4705,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(tag, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, tag type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tag type not found\n");
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring,
@@ -3731,8 +4715,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (dataLen && dataLen->type == cJSON_Number) {
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column len not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column len not found\n");
goto PARSE_OVER;
} else {
columnCase.dataLen = 0;
@@ -3747,16 +4730,52 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
if (index > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
superTbls->tagCount = index;
+ for (int t = 0; t < superTbls->tagCount; t++) {
+ if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "INT", strlen("INT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_INT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TINYINT", strlen("TINYINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TINYINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "SMALLINT", strlen("SMALLINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_SMALLINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BIGINT", strlen("BIGINT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BIGINT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "FLOAT", strlen("FLOAT"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_FLOAT;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "DOUBLE", strlen("DOUBLE"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_DOUBLE;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BINARY", strlen("BINARY"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BINARY;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "NCHAR", strlen("NCHAR"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NCHAR;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "BOOL", strlen("BOOL"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_BOOL;
+ } else if (0 == strncasecmp(superTbls->tags[t].dataType,
+ "TIMESTAMP", strlen("TIMESTAMP"))) {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ } else {
+ superTbls->tags[t].data_type = TSDB_DATA_TYPE_NULL;
+ }
+ }
+
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("columns + tags is more than allowed max columns count: %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
ret = true;
@@ -3779,7 +4798,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -3799,9 +4818,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
- tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
- tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, "taosdata", SHELL_MAX_PASSWORD_LEN);
}
cJSON* resultfile = cJSON_GetObjectItem(root, "result_file");
@@ -3817,51 +4836,46 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!threads) {
g_Dbs.threadCount = 1;
} else {
- printf("ERROR: failed to read json, threads not found\n");
+ errorPrint("%s", "failed to read json, threads not found\n");
goto PARSE_OVER;
}
cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl");
if (threads2 && threads2->type == cJSON_Number) {
- g_Dbs.threadCountByCreateTbl = threads2->valueint;
+ g_Dbs.threadCountForCreateTbl = threads2->valueint;
} else if (!threads2) {
- g_Dbs.threadCountByCreateTbl = 1;
+ g_Dbs.threadCountForCreateTbl = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads2 not found\n");
goto PARSE_OVER;
}
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
if (gInsertInterval->valueint <0) {
- errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert interval input mistake\n");
goto PARSE_OVER;
}
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
goto PARSE_OVER;
}
- g_args.interlace_rows = interlaceRows->valueint;
+ g_args.interlaceRows = interlaceRows->valueint;
} else if (!interlaceRows) {
- g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
+ g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlaceRows input mistake\n");
goto PARSE_OVER;
}
@@ -3895,9 +4909,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
prompt();
numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
}
- g_args.num_of_RPR = numRecPerReq->valueint;
+ g_args.reqPerReq = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = MAX_RECORDS_PER_REQ;
+ g_args.reqPerReq = MAX_RECORDS_PER_REQ;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
@@ -3923,25 +4937,25 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
// rows per table need be less than insert batch
- if (g_args.interlace_rows > g_args.num_of_RPR) {
+ if (g_args.interlaceRows > g_args.reqPerReq) {
printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n",
- g_args.interlace_rows, g_args.num_of_RPR);
+ g_args.interlaceRows, g_args.reqPerReq);
printf(" interlace rows value will be set to num_of_records_per_req %u\n\n",
- g_args.num_of_RPR);
+ g_args.reqPerReq);
prompt();
- g_args.interlace_rows = g_args.num_of_RPR;
+ g_args.interlaceRows = g_args.reqPerReq;
}
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (!dbs || dbs->type != cJSON_Array) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
int dbSize = cJSON_GetArraySize(dbs);
if (dbSize > MAX_DB_COUNT) {
errorPrint(
- "ERROR: failed to read json, databases size overflow, max database is %d\n",
+ "failed to read json, databases size overflow, max database is %d\n",
MAX_DB_COUNT);
goto PARSE_OVER;
}
@@ -3954,13 +4968,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
// dbinfo
cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
if (!dbinfo || dbinfo->type != cJSON_Object) {
- printf("ERROR: failed to read json, dbinfo not found\n");
+ errorPrint("%s", "failed to read json, dbinfo not found\n");
goto PARSE_OVER;
}
cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
- printf("ERROR: failed to read json, db name not found\n");
+ errorPrint("%s", "failed to read json, db name not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
@@ -3975,8 +4989,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!drop) {
g_Dbs.db[i].drop = g_args.drop_database;
} else {
- errorPrint("%s() LN%d, failed to read json, drop input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, drop input mistake\n");
goto PARSE_OVER;
}
@@ -3988,7 +5001,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!precision) {
memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, precision not found\n");
+ errorPrint("%s", "failed to read json, precision not found\n");
goto PARSE_OVER;
}
@@ -3998,7 +5011,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!update) {
g_Dbs.db[i].dbCfg.update = -1;
} else {
- printf("ERROR: failed to read json, update not found\n");
+ errorPrint("%s", "failed to read json, update not found\n");
goto PARSE_OVER;
}
@@ -4008,7 +5021,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!replica) {
g_Dbs.db[i].dbCfg.replica = -1;
} else {
- printf("ERROR: failed to read json, replica not found\n");
+ errorPrint("%s", "failed to read json, replica not found\n");
goto PARSE_OVER;
}
@@ -4018,7 +5031,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!keep) {
g_Dbs.db[i].dbCfg.keep = -1;
} else {
- printf("ERROR: failed to read json, keep not found\n");
+ errorPrint("%s", "failed to read json, keep not found\n");
goto PARSE_OVER;
}
@@ -4028,7 +5041,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!days) {
g_Dbs.db[i].dbCfg.days = -1;
} else {
- printf("ERROR: failed to read json, days not found\n");
+ errorPrint("%s", "failed to read json, days not found\n");
goto PARSE_OVER;
}
@@ -4038,7 +5051,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cache) {
g_Dbs.db[i].dbCfg.cache = -1;
} else {
- printf("ERROR: failed to read json, cache not found\n");
+ errorPrint("%s", "failed to read json, cache not found\n");
goto PARSE_OVER;
}
@@ -4048,7 +5061,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!blocks) {
g_Dbs.db[i].dbCfg.blocks = -1;
} else {
- printf("ERROR: failed to read json, block not found\n");
+ errorPrint("%s", "failed to read json, block not found\n");
goto PARSE_OVER;
}
@@ -4068,7 +5081,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!minRows) {
g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, minRows not found\n");
+ errorPrint("%s", "failed to read json, minRows not found\n");
goto PARSE_OVER;
}
@@ -4078,7 +5091,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxRows) {
g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, maxRows not found\n");
+ errorPrint("%s", "failed to read json, maxRows not found\n");
goto PARSE_OVER;
}
@@ -4088,7 +5101,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!comp) {
g_Dbs.db[i].dbCfg.comp = -1;
} else {
- printf("ERROR: failed to read json, comp not found\n");
+ errorPrint("%s", "failed to read json, comp not found\n");
goto PARSE_OVER;
}
@@ -4098,7 +5111,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!walLevel) {
g_Dbs.db[i].dbCfg.walLevel = -1;
} else {
- printf("ERROR: failed to read json, walLevel not found\n");
+ errorPrint("%s", "failed to read json, walLevel not found\n");
goto PARSE_OVER;
}
@@ -4108,7 +5121,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cacheLast) {
g_Dbs.db[i].dbCfg.cacheLast = -1;
} else {
- printf("ERROR: failed to read json, cacheLast not found\n");
+ errorPrint("%s", "failed to read json, cacheLast not found\n");
goto PARSE_OVER;
}
@@ -4128,24 +5141,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!fsync) {
g_Dbs.db[i].dbCfg.fsync = -1;
} else {
- errorPrint("%s() LN%d, failed to read json, fsync input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, fsync input mistake\n");
goto PARSE_OVER;
}
- // super_talbes
+ // super_tables
cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
if (!stables || stables->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super_tables not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super_tables not found\n");
goto PARSE_OVER;
}
int stbSize = cJSON_GetArraySize(stables);
if (stbSize > MAX_SUPER_TABLE_COUNT) {
errorPrint(
- "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n",
- __func__, __LINE__, MAX_SUPER_TABLE_COUNT);
+ "failed to read json, supertable size overflow, max supertable is %d\n",
+ MAX_SUPER_TABLE_COUNT);
goto PARSE_OVER;
}
@@ -4158,16 +5169,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
if (!stbName || stbName->type != cJSON_String
|| stbName->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, stb name not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stb name not found\n");
goto PARSE_OVER;
}
- tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
+ tstrncpy(g_Dbs.db[i].superTbls[j].stbName, stbName->valuestring,
TSDB_TABLE_NAME_LEN);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
- printf("ERROR: failed to read json, childtable_prefix not found\n");
+ errorPrint("%s", "failed to read json, childtable_prefix not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
@@ -4188,7 +5198,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!autoCreateTbl) {
g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
} else {
- printf("ERROR: failed to read json, auto_create_table not found\n");
+ errorPrint("%s", "failed to read json, auto_create_table not found\n");
goto PARSE_OVER;
}
@@ -4196,9 +5206,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
} else if (!batchCreateTbl) {
- g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000;
+ g_Dbs.db[i].superTbls[j].batchCreateTableNum = 10;
} else {
- printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
+ errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
}
@@ -4218,8 +5228,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!childTblExists) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
- errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, child_table_exists not found\n");
goto PARSE_OVER;
}
@@ -4229,11 +5239,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, childtable_count input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
+ g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
if (dataSource && dataSource->type == cJSON_String
@@ -4245,8 +5256,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
min(SMALL_BUFF_LEN, strlen("rand") + 1));
} else {
- errorPrint("%s() LN%d, failed to read json, data_source not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, data_source not found\n");
goto PARSE_OVER;
}
@@ -4257,13 +5267,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE;
} else if (0 == strcasecmp(stbIface->valuestring, "rest")) {
g_Dbs.db[i].superTbls[j].iface= REST_IFACE;
-#if STMT_IFACE_ENABLED == 1
} else if (0 == strcasecmp(stbIface->valuestring, "stmt")) {
g_Dbs.db[i].superTbls[j].iface= STMT_IFACE;
-#endif
} else {
- errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n",
- __func__, __LINE__, stbIface->valuestring);
+ errorPrint("failed to read json, insert_mode %s not recognized\n",
+ stbIface->valuestring);
goto PARSE_OVER;
}
} else if (!stbIface) {
@@ -4277,7 +5285,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
- printf("ERROR: failed to read json, childtable_limit\n");
+ errorPrint("%s", "failed to read json, childtable_limit\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
@@ -4290,7 +5298,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if ((childTbl_offset->type != cJSON_Number)
|| (0 > childTbl_offset->valueint)) {
- printf("ERROR: failed to read json, childtable_offset\n");
+ errorPrint("%s", "failed to read json, childtable_offset\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint;
@@ -4306,7 +5314,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
"now", TSDB_DB_NAME_LEN);
} else {
- printf("ERROR: failed to read json, start_timestamp not found\n");
+ errorPrint("%s", "failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
}
@@ -4316,7 +5324,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!timestampStep) {
g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
} else {
- printf("ERROR: failed to read json, timestamp_step not found\n");
+ errorPrint("%s", "failed to read json, timestamp_step not found\n");
goto PARSE_OVER;
}
@@ -4331,7 +5339,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, sample_format not found\n");
+ errorPrint("%s", "failed to read json, sample_format not found\n");
goto PARSE_OVER;
}
@@ -4346,7 +5354,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, sample_file not found\n");
+ errorPrint("%s", "failed to read json, sample_file not found\n");
goto PARSE_OVER;
}
@@ -4364,7 +5372,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
g_Dbs.db[i].superTbls[j].tagSource = 0;
} else {
- printf("ERROR: failed to read json, tags_file not found\n");
+ errorPrint("%s", "failed to read json, tags_file not found\n");
goto PARSE_OVER;
}
@@ -4380,8 +5388,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxSqlLen) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
- errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n");
goto PARSE_OVER;
}
/*
@@ -4398,31 +5405,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!multiThreadWriteOneTbl) {
g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
} else {
- printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n");
+ errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n");
goto PARSE_OVER;
}
*/
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
if (insertRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) {
if (stbInterlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
@@ -4437,11 +5441,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows;
}
} else if (!stbInterlaceRows) {
- g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
+ g_Dbs.db[i].superTbls[j].interlaceRows = g_args.interlaceRows; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint(
- "%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ "%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
@@ -4457,7 +5460,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRatio) {
g_Dbs.db[i].superTbls[j].disorderRatio = 0;
} else {
- printf("ERROR: failed to read json, disorderRatio not found\n");
+ errorPrint("%s", "failed to read json, disorderRatio not found\n");
goto PARSE_OVER;
}
@@ -4467,7 +5470,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRange) {
g_Dbs.db[i].superTbls[j].disorderRange = 1000;
} else {
- printf("ERROR: failed to read json, disorderRange not found\n");
+ errorPrint("%s", "failed to read json, disorderRange not found\n");
goto PARSE_OVER;
}
@@ -4475,17 +5478,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
if (insertInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
+ verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
@@ -4517,7 +5518,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -4537,9 +5538,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* password = cJSON_GetObjectItem(root, "password");
if (password && password->type == cJSON_String && password->valuestring != NULL) {
- tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE);
+ tstrncpy(g_queryInfo.password, password->valuestring, SHELL_MAX_PASSWORD_LEN);
} else if (!password) {
- tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);;
+ tstrncpy(g_queryInfo.password, "taosdata", SHELL_MAX_PASSWORD_LEN);;
}
cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no,
@@ -4555,23 +5556,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!answerPrompt) {
g_args.answer_yes = false;
} else {
- printf("ERROR: failed to read json, confirm_parameter_prompt not found\n");
+ errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n");
goto PARSE_OVER;
}
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
if (gQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s()", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
@@ -4579,7 +5578,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
@@ -4593,7 +5592,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.queryMode, "taosc",
min(SMALL_BUFF_LEN, strlen("taosc") + 1));
} else {
- printf("ERROR: failed to read json, query_mode not found\n");
+ errorPrint("%s", "failed to read json, query_mode not found\n");
goto PARSE_OVER;
}
@@ -4603,7 +5602,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, super_table_query not found\n");
+ errorPrint("%s", "failed to read json, super_table_query not found\n");
goto PARSE_OVER;
} else {
cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval");
@@ -4618,8 +5617,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
if (specifiedQueryTimes->valueint <= 0) {
errorPrint(
- "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, specifiedQueryTimes->valueint);
+ "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ specifiedQueryTimes->valueint);
goto PARSE_OVER;
}
@@ -4636,8 +5635,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (concurrent && concurrent->type == cJSON_Number) {
if (concurrent->valueint <= 0) {
errorPrint(
- "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
- __func__, __LINE__,
+ "query sqlCount %d or concurrent %d is not correct.\n",
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
@@ -4655,8 +5653,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4679,7 +5676,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", restart->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4695,7 +5692,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", keepProgress->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe keepProgress error\n");
+ errorPrint("%s", "failed to read json, subscribe keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4707,15 +5704,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!specifiedSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(specifiedSqls);
if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
> MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
- __func__, __LINE__,
+ errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
superSqlSize,
g_queryInfo.specifiedQueryInfo.concurrent,
MAX_QUERY_SQL_COUNT);
@@ -4729,7 +5724,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("ERROR: failed to read json, sql not found\n");
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
@@ -4769,7 +5764,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
memset(g_queryInfo.specifiedQueryInfo.result[j],
0, MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, super query result file not found\n");
+ errorPrint("%s",
+ "failed to read json, super query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4782,7 +5778,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, sub_table_query not found\n");
+ errorPrint("%s", "failed to read json, sub_table_query not found\n");
ret = true;
goto PARSE_OVER;
} else {
@@ -4796,24 +5792,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
if (superQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, superQueryTimes->valueint);
+ errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ superQueryTimes->valueint);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
if (threads->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads input mistake\n");
goto PARSE_OVER;
}
@@ -4832,11 +5826,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname");
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
- tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
+ tstrncpy(g_queryInfo.superQueryInfo.stbName, stblname->valuestring,
TSDB_TABLE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, super table name input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super table name input error\n");
goto PARSE_OVER;
}
@@ -4848,8 +5841,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", superAsyncMode->valuestring)) {
g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4859,8 +5851,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
if (superInterval && superInterval->type == cJSON_Number) {
if (superInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interval input mistake\n");
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
@@ -4878,7 +5869,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4894,7 +5885,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", superkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe super table keepProgress error\n");
+ errorPrint("%s",
+ "failed to read json, subscribe super table keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4931,14 +5923,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!superSqls) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
- __func__, __LINE__, MAX_QUERY_SQL_COUNT);
+ errorPrint("failed to read json, query sql size overflow, max is %d\n",
+ MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4950,8 +5941,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String
|| sqlStr->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, sql not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
@@ -4959,14 +5949,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
- && result->valuestring != NULL){
+ && result->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.result[j],
result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, sub query result file not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sub query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4984,7 +5973,7 @@ static bool getInfoFromJsonFile(char* file) {
FILE *fp = fopen(file, "r");
if (!fp) {
- printf("failed to read %s, reason:%s\n", file, strerror(errno));
+ errorPrint("failed to read %s, reason:%s\n", file, strerror(errno));
return false;
}
@@ -4995,14 +5984,14 @@ static bool getInfoFromJsonFile(char* file) {
if (len <= 0) {
free(content);
fclose(fp);
- printf("failed to read %s, content is null", file);
+ errorPrint("failed to read %s, content is null", file);
return false;
}
content[len] = 0;
cJSON* root = cJSON_Parse(content);
if (root == NULL) {
- printf("ERROR: failed to cjson parse %s, invalid json format\n", file);
+ errorPrint("failed to cjson parse %s, invalid json format\n", file);
goto PARSE_OVER;
}
@@ -5015,13 +6004,13 @@ static bool getInfoFromJsonFile(char* file) {
} else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
g_args.test_mode = SUBSCRIBE_TEST;
} else {
- printf("ERROR: failed to read json, filetype not support\n");
+ errorPrint("%s", "failed to read json, filetype not support\n");
goto PARSE_OVER;
}
} else if (!filetype) {
g_args.test_mode = INSERT_TEST;
} else {
- printf("ERROR: failed to read json, filetype not found\n");
+ errorPrint("%s", "failed to read json, filetype not found\n");
goto PARSE_OVER;
}
@@ -5031,8 +6020,8 @@ static bool getInfoFromJsonFile(char* file) {
|| (SUBSCRIBE_TEST == g_args.test_mode)) {
ret = getMetaFromQueryJsonFile(root);
} else {
- errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "input json file type error! please input correct file type: insert or query or subscribe\n");
goto PARSE_OVER;
}
@@ -5059,39 +6048,37 @@ static int prepareSampleData() {
static void postFreeResource() {
tmfclose(g_fpOfInsertResult);
+
for (int i = 0; i < g_Dbs.dbCount; i++) {
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) {
- free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
+ tmfree(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL;
}
if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) {
- free(g_Dbs.db[i].superTbls[j].sampleDataBuf);
+ tmfree(g_Dbs.db[i].superTbls[j].sampleDataBuf);
g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL;
}
-#if STMT_IFACE_ENABLED == 1
- if (g_Dbs.db[i].superTbls[j].sampleBindArray) {
- for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) {
- uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)(
- g_Dbs.db[i].superTbls[j].sampleBindArray
- + sizeof(uintptr_t *) * k));
- for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) {
- TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c));
- if (bind)
- tmfree(bind->buffer);
- }
- tmfree((char *)tmp);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int c = 0;
+ c < g_Dbs.db[i].superTbls[j].columnCount; c ++) {
+
+ if (g_Dbs.db[i].superTbls[j].sampleBindBatchArray) {
+
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_Dbs.db[i].superTbls[j].sampleBindBatchArray
+ + sizeof(char*) * c)));
}
}
- tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray);
+ tmfree(g_Dbs.db[i].superTbls[j].sampleBindBatchArray);
#endif
-
if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) {
- free(g_Dbs.db[i].superTbls[j].tagDataBuf);
+ tmfree(g_Dbs.db[i].superTbls[j].tagDataBuf);
g_Dbs.db[i].superTbls[j].tagDataBuf = NULL;
}
if (0 != g_Dbs.db[i].superTbls[j].childTblName) {
- free(g_Dbs.db[i].superTbls[j].childTblName);
+ tmfree(g_Dbs.db[i].superTbls[j].childTblName);
g_Dbs.db[i].superTbls[j].childTblName = NULL;
}
}
@@ -5107,13 +6094,26 @@ static void postFreeResource() {
tmfree(g_rand_current_buff);
tmfree(g_rand_phase_buff);
+ tmfree(g_sampleDataBuf);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ for (int l = 0;
+ l < g_args.columnCount; l ++) {
+ if (g_sampleBindBatchArray) {
+ tmfree((char *)((uintptr_t)*(uintptr_t*)(
+ g_sampleBindBatchArray
+ + sizeof(char*) * l)));
+ }
+ }
+ tmfree(g_sampleBindBatchArray);
+#endif
}
static int getRowDataFromSample(
char* dataBuf, int64_t maxLen, int64_t timestamp,
SSuperTable* stbInfo, int64_t* sampleUsePos)
{
- if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
+ if ((*sampleUsePos) == MAX_SAMPLES) {
*sampleUsePos = 0;
}
@@ -5144,15 +6144,16 @@ static int64_t generateStbRowData(
int tmpLen;
dataLen += snprintf(pstr + dataLen, maxLen - dataLen,
- "(%" PRId64 ",", timestamp);
+ "(%" PRId64 "", timestamp);
for (int i = 0; i < stbInfo->columnCount; i++) {
- if ((0 == strncasecmp(stbInfo->columns[i].dataType,
- "BINARY", 6))
- || (0 == strncasecmp(stbInfo->columns[i].dataType,
- "NCHAR", 5))) {
+ tstrncpy(pstr + dataLen, ",", 2);
+ dataLen += 1;
+
+ if ((stbInfo->columns[i].data_type == TSDB_DATA_TYPE_BINARY)
+ || (stbInfo->columns[i].data_type == TSDB_DATA_TYPE_NCHAR)) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary or nchar length overflow, max size:%u\n",
+ errorPrint2("binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5164,83 +6165,95 @@ static int64_t generateStbRowData(
}
char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
if (NULL == buf) {
- errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf);
+ dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\'", buf);
tmfree(buf);
} else {
- char *tmp;
+ char *tmp = NULL;
+ switch(stbInfo->columns[i].data_type) {
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (i == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
+ break;
- if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "INT", 3)) {
- if ((g_args.demo_mode) && (i == 1)) {
- tmp = demo_voltage_int_str();
- } else {
- tmp = rand_int_str();
- }
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BIGINT", 6)) {
- tmp = rand_bigint_str();
- tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN);
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "FLOAT", 5)) {
- if (g_args.demo_mode) {
- if (i == 0) {
- tmp = demo_current_float_str();
+ case TSDB_DATA_TYPE_BIGINT:
+ tmp = rand_bigint_str();
+ tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (i == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
} else {
- tmp = demo_phase_float_str();
+ tmp = rand_float_str();
}
- } else {
- tmp = rand_float_str();
- }
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "DOUBLE", 6)) {
- tmp = rand_double_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "SMALLINT", 8)) {
- tmp = rand_smallint_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TINYINT", 7)) {
- tmp = rand_tinyint_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "BOOL", 4)) {
- tmp = rand_bool_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN));
- } else if (0 == strncasecmp(stbInfo->columns[i].dataType,
- "TIMESTAMP", 9)) {
- tmp = rand_int_str();
- tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN));
- } else {
- errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType);
- return -1;
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmp = rand_double_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ tmp = rand_smallint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp,
+ min(tmpLen + 1, SMALLINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ tmp = rand_tinyint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmp = rand_bool_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmp = rand_bigint_str();
+ tmpLen = strlen(tmp);
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN));
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %s\n",
+ stbInfo->columns[i].dataType);
+ exit(EXIT_FAILURE);
}
- dataLen += strlen(tmp);
- tstrncpy(pstr + dataLen, ",", 2);
- dataLen += 1;
+ if (tmp) {
+ dataLen += strlen(tmp);
+ }
}
if (dataLen > (remainderBufLen - (128)))
return 0;
}
- dataLen -= 1;
- dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")");
+ tstrncpy(pstr + dataLen, ")", 2);
verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen);
verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf);
@@ -5248,53 +6261,83 @@ static int64_t generateStbRowData(
return strlen(recBuf);
}
-static int64_t generateData(char *recBuf, char **data_type,
+static int64_t generateData(char *recBuf, char *data_type,
int64_t timestamp, int lenOfBinary) {
memset(recBuf, 0, MAX_DATA_SIZE);
char *pstr = recBuf;
- pstr += sprintf(pstr, "(%" PRId64, timestamp);
+ pstr += sprintf(pstr, "(%"PRId64"", timestamp);
- int columnCount = g_args.num_of_CPR;
+ int columnCount = g_args.columnCount;
+ bool b;
+ char *s;
for (int i = 0; i < columnCount; i++) {
- if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_tinyint() );
- } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_smallint());
- } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) {
- pstr += sprintf(pstr, ",%d", rand_int());
- } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
- } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) {
- pstr += sprintf(pstr, ",%10.4f", rand_float());
- } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) {
- double t = rand_double();
- pstr += sprintf(pstr, ",%20.8f", t);
- } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) {
- bool b = rand_bool() & 1;
- pstr += sprintf(pstr, ",%s", b ? "true" : "false");
- } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) {
- char *s = malloc(lenOfBinary + 1);
- if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
- __func__, __LINE__, lenOfBinary + 1);
- exit(EXIT_FAILURE);
- }
- rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ",\"%s\"", s);
- free(s);
- } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) {
- char *s = malloc(lenOfBinary + 1);
- if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
- __func__, __LINE__, lenOfBinary + 1);
+ switch (data_type[i]) {
+ case TSDB_DATA_TYPE_TINYINT:
+ pstr += sprintf(pstr, ",%d", rand_tinyint() );
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pstr += sprintf(pstr, ",%d", rand_smallint());
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ pstr += sprintf(pstr, ",%d", rand_int());
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ pstr += sprintf(pstr, ",%10.4f", rand_float());
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pstr += sprintf(pstr, ",%20.8f", rand_double());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ b = rand_bool() & 1;
+ pstr += sprintf(pstr, ",%s", b ? "true" : "false");
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ s = malloc(lenOfBinary + 1);
+ if (s == NULL) {
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
+ __func__, __LINE__, lenOfBinary + 1);
+ exit(EXIT_FAILURE);
+ }
+ rand_string(s, lenOfBinary);
+ pstr += sprintf(pstr, ",\"%s\"", s);
+ free(s);
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %d\n",
+ __func__, __LINE__,
+ data_type[i]);
exit(EXIT_FAILURE);
- }
- rand_string(s, lenOfBinary);
- pstr += sprintf(pstr, ",\"%s\"", s);
- free(s);
}
if (strlen(recBuf) > MAX_DATA_SIZE) {
@@ -5309,26 +6352,165 @@ static int64_t generateData(char *recBuf, char **data_type,
return (int32_t)strlen(recBuf);
}
-static int prepareSampleDataForSTable(SSuperTable *stbInfo) {
- char* sampleDataBuf = NULL;
+static int generateSampleFromRand(
+ char *sampleDataBuf,
+ uint64_t lenOfOneRow,
+ int columnCount,
+ StrColumn *columns
+ )
+{
+ char data[MAX_DATA_SIZE];
+ memset(data, 0, MAX_DATA_SIZE);
+
+ char *buff = malloc(lenOfOneRow);
+ if (NULL == buff) {
+ errorPrint2("%s() LN%d, memory allocation %"PRIu64" bytes failed\n",
+ __func__, __LINE__, lenOfOneRow);
+ exit(EXIT_FAILURE);
+ }
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ uint64_t pos = 0;
+ memset(buff, 0, lenOfOneRow);
+
+ for (int c = 0; c < columnCount; c++) {
+ char *tmp = NULL;
+
+ uint32_t dataLen;
+ char data_type = (columns)?(columns[c].data_type):g_args.data_type[c];
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ dataLen = (columns)?columns[c].dataLen:g_args.binwidth;
+ rand_string(data, dataLen);
+ pos += sprintf(buff + pos, "%s,", data);
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if ((g_args.demo_mode) && (c == 1)) {
+ tmp = demo_voltage_int_str();
+ } else {
+ tmp = rand_int_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (g_args.demo_mode) {
+ if (c == 0) {
+ tmp = demo_current_float_str();
+ } else {
+ tmp = demo_phase_float_str();
+ }
+ } else {
+ tmp = rand_float_str();
+ }
+ pos += sprintf(buff + pos, "%s,", tmp);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ pos += sprintf(buff + pos, "%s,", rand_double_str());
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ pos += sprintf(buff + pos, "%s,", rand_smallint_str());
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ pos += sprintf(buff + pos, "%s,", rand_tinyint_str());
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ pos += sprintf(buff + pos, "%s,", rand_bool_str());
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pos += sprintf(buff + pos, "%s,", rand_bigint_str());
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("%s() LN%d, Unknown data type %s\n",
+ __func__, __LINE__,
+ (columns)?(columns[c].dataType):g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ *(buff + pos - 1) = 0;
+ memcpy(sampleDataBuf + i * lenOfOneRow, buff, pos);
+ }
+
+ free(buff);
+ return 0;
+}
+
+static int generateSampleFromRandForNtb()
+{
+ return generateSampleFromRand(
+ g_sampleDataBuf,
+ g_args.lenOfOneRow,
+ g_args.columnCount,
+ NULL);
+}
+
+static int generateSampleFromRandForStb(SSuperTable *stbInfo)
+{
+ return generateSampleFromRand(
+ stbInfo->sampleDataBuf,
+ stbInfo->lenOfOneRow,
+ stbInfo->columnCount,
+ stbInfo->columns);
+}
+
+static int prepareSampleForNtb() {
+ g_sampleDataBuf = calloc(g_args.lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == g_sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ __func__, __LINE__,
+ g_args.lenOfOneRow * MAX_SAMPLES,
+ strerror(errno));
+ return -1;
+ }
+
+ return generateSampleFromRandForNtb();
+}
+
+static int prepareSampleForStb(SSuperTable *stbInfo) {
- sampleDataBuf = calloc(
- stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
- if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ stbInfo->sampleDataBuf = calloc(
+ stbInfo->lenOfOneRow * MAX_SAMPLES, 1);
+ if (NULL == stbInfo->sampleDataBuf) {
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
- stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
+ stbInfo->lenOfOneRow * MAX_SAMPLES,
strerror(errno));
return -1;
}
- stbInfo->sampleDataBuf = sampleDataBuf;
- int ret = readSampleFromCsvFileToMem(stbInfo);
+ int ret;
+ if (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample"))) {
+ ret = generateSampleFromCsvForStb(stbInfo);
+ } else {
+ ret = generateSampleFromRandForStb(stbInfo);
+ }
if (0 != ret) {
- errorPrint("%s() LN%d, read sample from csv file failed.\n",
+ errorPrint2("%s() LN%d, read sample from csv file failed.\n",
__func__, __LINE__);
- tmfree(sampleDataBuf);
+ tmfree(stbInfo->sampleDataBuf);
stbInfo->sampleDataBuf = NULL;
return -1;
}
@@ -5341,9 +6523,6 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
int32_t affectedRows;
SSuperTable* stbInfo = pThreadInfo->stbInfo;
- verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
- __func__, __LINE__, pThreadInfo->buffer);
-
uint16_t iface;
if (stbInfo)
iface = stbInfo->iface;
@@ -5361,12 +6540,18 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
switch(iface) {
case TAOSC_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
affectedRows = queryDbExec(
pThreadInfo->taos,
pThreadInfo->buffer, INSERT_TYPE, false);
break;
case REST_IFACE:
+ verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
+ __func__, __LINE__, pThreadInfo->buffer);
+
if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
pThreadInfo->buffer, pThreadInfo)) {
affectedRows = -1;
@@ -5377,12 +6562,11 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
}
break;
-#if STMT_IFACE_ENABLED == 1
case STMT_IFACE:
debugPrint("%s() LN%d, stmt=%p",
__func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
- errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
+ errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
@@ -5390,10 +6574,9 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
}
affectedRows = k;
break;
-#endif
default:
- errorPrint("%s() LN%d: unknown insert mode: %d\n",
+ errorPrint2("%s() LN%d: unknown insert mode: %d\n",
__func__, __LINE__, stbInfo->iface);
affectedRows = 0;
}
@@ -5447,8 +6630,8 @@ static int32_t generateDataTailWithoutStb(
int64_t retLen = 0;
- char **data_type = g_args.datatype;
- int lenOfBinary = g_args.len_of_binary;
+ char *data_type = g_args.data_type;
+ int lenOfBinary = g_args.binwidth;
if (g_args.disorderRatio) {
retLen = generateData(data, data_type,
@@ -5621,7 +6804,7 @@ static int generateStbSQLHead(
tableSeq % stbInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -5633,7 +6816,7 @@ static int generateStbSQLHead(
dbName,
tableName,
dbName,
- stbInfo->sTblName,
+ stbInfo->stbName,
tagsValBuf);
tmfree(tagsValBuf);
} else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) {
@@ -5763,406 +6946,457 @@ static int64_t generateInterlaceDataWithoutStb(
return k;
}
-#if STMT_IFACE_ENABLED == 1
static int32_t prepareStmtBindArrayByType(
TAOS_BIND *bind,
- char *dataType, int32_t dataLen,
+ char data_type, int32_t dataLen,
int32_t timePrec,
char *value)
{
- if (0 == strncasecmp(dataType,
- "BINARY", strlen("BINARY"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_binary;
+ int32_t *bind_int;
+ int64_t *bind_bigint;
+ float *bind_float;
+ double *bind_double;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+ int16_t *bind_smallint;
+ int8_t *bind_tinyint;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary;
- bind->buffer_type = TSDB_DATA_TYPE_BINARY;
- if (value) {
- bind_binary = calloc(1, strlen(value) + 1);
- strncpy(bind_binary, value, strlen(value));
- bind->buffer_length = strlen(bind_binary);
- } else {
- bind_binary = calloc(1, dataLen + 1);
- rand_string(bind_binary, dataLen);
- bind->buffer_length = dataLen;
- }
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ bind_binary = calloc(1, strlen(value) + 1);
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ bind_binary = calloc(1, dataLen + 1);
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
- bind->length = &bind->buffer_length;
- bind->buffer = bind_binary;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "NCHAR", strlen("NCHAR"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
+ break;
- bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
- if (value) {
- bind_nchar = calloc(1, strlen(value) + 1);
- strncpy(bind_nchar, value, strlen(value));
- } else {
- bind_nchar = calloc(1, dataLen + 1);
- rand_string(bind_nchar, dataLen);
- }
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar;
- bind->buffer_length = strlen(bind_nchar);
- bind->buffer = bind_nchar;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "INT", strlen("INT"))) {
- int32_t *bind_int = malloc(sizeof(int32_t));
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ bind_nchar = calloc(1, strlen(value) + 1);
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ bind_nchar = calloc(1, dataLen + 1);
+ rand_string(bind_nchar, dataLen);
+ }
- if (value) {
- *bind_int = atoi(value);
- } else {
- *bind_int = rand_int();
- }
- bind->buffer_type = TSDB_DATA_TYPE_INT;
- bind->buffer_length = sizeof(int32_t);
- bind->buffer = bind_int;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "BIGINT", strlen("BIGINT"))) {
- int64_t *bind_bigint = malloc(sizeof(int64_t));
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_bigint = atoll(value);
- } else {
- *bind_bigint = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_bigint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "FLOAT", strlen("FLOAT"))) {
- float *bind_float = malloc(sizeof(float));
+ case TSDB_DATA_TYPE_INT:
+ bind_int = malloc(sizeof(int32_t));
+ assert(bind_int);
- if (value) {
- *bind_float = (float)atof(value);
- } else {
- *bind_float = rand_float();
- }
- bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
- bind->buffer_length = sizeof(float);
- bind->buffer = bind_float;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "DOUBLE", strlen("DOUBLE"))) {
- double *bind_double = malloc(sizeof(double));
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_double = atof(value);
- } else {
- *bind_double = rand_double();
- }
- bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
- bind->buffer_length = sizeof(double);
- bind->buffer = bind_double;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "SMALLINT", strlen("SMALLINT"))) {
- int16_t *bind_smallint = malloc(sizeof(int16_t));
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = malloc(sizeof(int64_t));
+ assert(bind_bigint);
- if (value) {
- *bind_smallint = (int16_t)atoi(value);
- } else {
- *bind_smallint = rand_smallint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
- bind->buffer_length = sizeof(int16_t);
- bind->buffer = bind_smallint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "TINYINT", strlen("TINYINT"))) {
- int8_t *bind_tinyint = malloc(sizeof(int8_t));
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- if (value) {
- *bind_tinyint = (int8_t)atoi(value);
- } else {
- *bind_tinyint = rand_tinyint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_tinyint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else if (0 == strncasecmp(dataType,
- "BOOL", strlen("BOOL"))) {
- int8_t *bind_bool = malloc(sizeof(int8_t));
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = malloc(sizeof(float));
+ assert(bind_float);
- if (value) {
- if (strncasecmp(value, "true", 4)) {
- *bind_bool = true;
+ if (value) {
+ *bind_float = (float)atof(value);
} else {
- *bind_bool = false;
+ *bind_float = rand_float();
}
- } else {
- *bind_bool = rand_bool();
- }
- bind->buffer_type = TSDB_DATA_TYPE_BOOL;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_bool;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
- } else if (0 == strncasecmp(dataType,
- "TIMESTAMP", strlen("TIMESTAMP"))) {
- int64_t *bind_ts2 = malloc(sizeof(int64_t));
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = malloc(sizeof(double));
+ assert(bind_double);
- if (value) {
- if (strchr(value, ':') && strchr(value, '-')) {
- int i = 0;
- while(value[i] != '\0') {
- if (value[i] == '\"' || value[i] == '\'') {
- value[i] = ' ';
- }
- i++;
- }
- int64_t tmpEpoch;
- if (TSDB_CODE_SUCCESS != taosParseTime(
- value, &tmpEpoch, strlen(value),
- timePrec, 0)) {
- errorPrint("Input %s, time format error!\n", value);
- return -1;
- }
- *bind_ts2 = tmpEpoch;
+ if (value) {
+ *bind_double = atof(value);
} else {
- *bind_ts2 = atoll(value);
+ *bind_double = rand_double();
}
- } else {
- *bind_ts2 = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_ts2;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else {
- errorPrint( "No support data type: %s\n", dataType);
- return -1;
- }
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = malloc(sizeof(int16_t));
+ assert(bind_smallint);
+
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = malloc(sizeof(int8_t));
+ assert(bind_tinyint);
+
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = malloc(sizeof(int8_t));
+ assert(bind_bool);
+
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
+ }
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = malloc(sizeof(int64_t));
+ assert(bind_ts2);
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ free(bind_ts2);
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
+ }
+ } else {
+ *bind_ts2 = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ break;
+
+ case TSDB_DATA_TYPE_NULL:
+ break;
+
+ default:
+ errorPrint2("Not support data type: %d\n", data_type);
+ exit(EXIT_FAILURE);
+ }
return 0;
}
static int32_t prepareStmtBindArrayByTypeForRand(
TAOS_BIND *bind,
- char *dataType, int32_t dataLen,
+ char data_type, int32_t dataLen,
int32_t timePrec,
char **ptr,
char *value)
{
- if (0 == strncasecmp(dataType,
- "BINARY", strlen("BINARY"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_binary = (char *)*ptr;
+ int32_t *bind_int;
+ int64_t *bind_bigint;
+ float *bind_float;
+ double *bind_double;
+ int16_t *bind_smallint;
+ int8_t *bind_tinyint;
+ int8_t *bind_bool;
+ int64_t *bind_ts2;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
- bind->buffer_type = TSDB_DATA_TYPE_BINARY;
- if (value) {
- strncpy(bind_binary, value, strlen(value));
- bind->buffer_length = strlen(bind_binary);
- } else {
- rand_string(bind_binary, dataLen);
- bind->buffer_length = dataLen;
- }
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("binary length overflow, max size:%u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_binary = (char *)*ptr;
- bind->length = &bind->buffer_length;
- bind->buffer = bind_binary;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ if (value) {
+ strncpy(bind_binary, value, strlen(value));
+ bind->buffer_length = strlen(bind_binary);
+ } else {
+ rand_string(bind_binary, dataLen);
+ bind->buffer_length = dataLen;
+ }
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "NCHAR", strlen("NCHAR"))) {
- if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "nchar length overflow, max size:%u\n",
- (uint32_t)TSDB_MAX_BINARY_LEN);
- return -1;
- }
- char *bind_nchar = (char *)*ptr;
+ bind->length = &bind->buffer_length;
+ bind->buffer = bind_binary;
+ bind->is_null = NULL;
- bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
- if (value) {
- strncpy(bind_nchar, value, strlen(value));
- } else {
- rand_string(bind_nchar, dataLen);
- }
+ *ptr += bind->buffer_length;
+ break;
- bind->buffer_length = strlen(bind_nchar);
- bind->buffer = bind_nchar;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_NCHAR:
+ if (dataLen > TSDB_MAX_BINARY_LEN) {
+ errorPrint2("nchar length overflow, max size: %u\n",
+ (uint32_t)TSDB_MAX_BINARY_LEN);
+ return -1;
+ }
+ char *bind_nchar = (char *)*ptr;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "INT", strlen("INT"))) {
- int32_t *bind_int = (int32_t *)*ptr;
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ if (value) {
+ strncpy(bind_nchar, value, strlen(value));
+ } else {
+ rand_string(bind_nchar, dataLen);
+ }
- if (value) {
- *bind_int = atoi(value);
- } else {
- *bind_int = rand_int();
- }
- bind->buffer_type = TSDB_DATA_TYPE_INT;
- bind->buffer_length = sizeof(int32_t);
- bind->buffer = bind_int;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ bind->buffer_length = strlen(bind_nchar);
+ bind->buffer = bind_nchar;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "BIGINT", strlen("BIGINT"))) {
- int64_t *bind_bigint = (int64_t *)*ptr;
+ *ptr += bind->buffer_length;
+ break;
- if (value) {
- *bind_bigint = atoll(value);
- } else {
- *bind_bigint = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_bigint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_INT:
+ bind_int = (int32_t *)*ptr;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "FLOAT", strlen("FLOAT"))) {
- float *bind_float = (float *)*ptr;
+ if (value) {
+ *bind_int = atoi(value);
+ } else {
+ *bind_int = rand_int();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = bind_int;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- if (value) {
- *bind_float = (float)atof(value);
- } else {
- *bind_float = rand_float();
- }
- bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
- bind->buffer_length = sizeof(float);
- bind->buffer = bind_float;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ *ptr += bind->buffer_length;
+ break;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "DOUBLE", strlen("DOUBLE"))) {
- double *bind_double = (double *)*ptr;
+ case TSDB_DATA_TYPE_BIGINT:
+ bind_bigint = (int64_t *)*ptr;
- if (value) {
- *bind_double = atof(value);
- } else {
- *bind_double = rand_double();
- }
- bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
- bind->buffer_length = sizeof(double);
- bind->buffer = bind_double;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ if (value) {
+ *bind_bigint = atoll(value);
+ } else {
+ *bind_bigint = rand_bigint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_bigint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "SMALLINT", strlen("SMALLINT"))) {
- int16_t *bind_smallint = (int16_t *)*ptr;
+ *ptr += bind->buffer_length;
+ break;
- if (value) {
- *bind_smallint = (int16_t)atoi(value);
- } else {
- *bind_smallint = rand_smallint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
- bind->buffer_length = sizeof(int16_t);
- bind->buffer = bind_smallint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ case TSDB_DATA_TYPE_FLOAT:
+ bind_float = (float *)*ptr;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "TINYINT", strlen("TINYINT"))) {
- int8_t *bind_tinyint = (int8_t *)*ptr;
+ if (value) {
+ *bind_float = (float)atof(value);
+ } else {
+ *bind_float = rand_float();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = bind_float;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- if (value) {
- *bind_tinyint = (int8_t)atoi(value);
- } else {
- *bind_tinyint = rand_tinyint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_tinyint;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ *ptr += bind->buffer_length;
+ break;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "BOOL", strlen("BOOL"))) {
- int8_t *bind_bool = (int8_t *)*ptr;
+ case TSDB_DATA_TYPE_DOUBLE:
+ bind_double = (double *)*ptr;
- if (value) {
- if (strncasecmp(value, "true", 4)) {
- *bind_bool = true;
+ if (value) {
+ *bind_double = atof(value);
} else {
- *bind_bool = false;
+ *bind_double = rand_double();
}
- } else {
- *bind_bool = rand_bool();
- }
- bind->buffer_type = TSDB_DATA_TYPE_BOOL;
- bind->buffer_length = sizeof(int8_t);
- bind->buffer = bind_bool;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = bind_double;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- *ptr += bind->buffer_length;
- } else if (0 == strncasecmp(dataType,
- "TIMESTAMP", strlen("TIMESTAMP"))) {
- int64_t *bind_ts2 = (int64_t *)*ptr;
-
- if (value) {
- if (strchr(value, ':') && strchr(value, '-')) {
- int i = 0;
- while(value[i] != '\0') {
- if (value[i] == '\"' || value[i] == '\'') {
- value[i] = ' ';
- }
- i++;
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ bind_smallint = (int16_t *)*ptr;
+
+ if (value) {
+ *bind_smallint = (int16_t)atoi(value);
+ } else {
+ *bind_smallint = rand_smallint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = bind_smallint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ bind_tinyint = (int8_t *)*ptr;
+
+ if (value) {
+ *bind_tinyint = (int8_t)atoi(value);
+ } else {
+ *bind_tinyint = rand_tinyint();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_tinyint;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ bind_bool = (int8_t *)*ptr;
+
+ if (value) {
+ if (strncasecmp(value, "true", 4)) {
+ *bind_bool = true;
+ } else {
+ *bind_bool = false;
}
- int64_t tmpEpoch;
- if (TSDB_CODE_SUCCESS != taosParseTime(
- value, &tmpEpoch, strlen(value),
- timePrec, 0)) {
- errorPrint("Input %s, time format error!\n", value);
- return -1;
+ } else {
+ *bind_bool = rand_bool();
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = bind_bool;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ *ptr += bind->buffer_length;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ bind_ts2 = (int64_t *)*ptr;
+
+ if (value) {
+ if (strchr(value, ':') && strchr(value, '-')) {
+ int i = 0;
+ while(value[i] != '\0') {
+ if (value[i] == '\"' || value[i] == '\'') {
+ value[i] = ' ';
+ }
+ i++;
+ }
+ int64_t tmpEpoch;
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ value, &tmpEpoch, strlen(value),
+ timePrec, 0)) {
+ errorPrint2("Input %s, time format error!\n", value);
+ return -1;
+ }
+ *bind_ts2 = tmpEpoch;
+ } else {
+ *bind_ts2 = atoll(value);
}
- *bind_ts2 = tmpEpoch;
} else {
- *bind_ts2 = atoll(value);
+ *bind_ts2 = rand_bigint();
}
- } else {
- *bind_ts2 = rand_bigint();
- }
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_ts2;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts2;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
- *ptr += bind->buffer_length;
- } else {
- errorPrint( "No support data type: %s\n", dataType);
- return -1;
+ *ptr += bind->buffer_length;
+ break;
+
+ default:
+ errorPrint2("No support data type: %d\n", data_type);
+ return -1;
}
return 0;
@@ -6179,17 +7413,17 @@ static int32_t prepareStmtWithoutStb(
TAOS_STMT *stmt = pThreadInfo->stmt;
int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) {
- errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
+ errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
tableName, ret, taos_stmt_errstr(stmt));
return ret;
}
- char **data_type = g_args.datatype;
+ char *data_type = g_args.data_type;
- char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1));
+ char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.columnCount + 1));
if (bindArray == NULL) {
- errorPrint("Failed to allocate %d bind params\n",
- (g_args.num_of_CPR + 1));
+ errorPrint2("Failed to allocate %d bind params\n",
+ (g_args.columnCount + 1));
return -1;
}
@@ -6216,26 +7450,27 @@ static int32_t prepareStmtWithoutStb(
bind->length = &bind->buffer_length;
bind->is_null = NULL;
- for (int i = 0; i < g_args.num_of_CPR; i ++) {
+ for (int i = 0; i < g_args.columnCount; i ++) {
bind = (TAOS_BIND *)((char *)bindArray
+ (sizeof(TAOS_BIND) * (i + 1)));
if ( -1 == prepareStmtBindArrayByType(
bind,
data_type[i],
- g_args.len_of_binary,
+ g_args.binwidth,
pThreadInfo->time_precision,
NULL)) {
+ free(bindArray);
return -1;
}
}
if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
// if msg > 3MB, break
if (0 != taos_stmt_add_batch(stmt)) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
@@ -6256,29 +7491,20 @@ static int32_t prepareStbStmtBindTag(
char *tagsVal,
int32_t timePrec)
{
- char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
- if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
- __func__, __LINE__, g_args.len_of_binary);
- return -1;
- }
-
TAOS_BIND *tag;
for (int t = 0; t < stbInfo->tagCount; t ++) {
tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t));
if ( -1 == prepareStmtBindArrayByType(
tag,
- stbInfo->tags[t].dataType,
+ stbInfo->tags[t].data_type,
stbInfo->tags[t].dataLen,
timePrec,
NULL)) {
- free(bindBuffer);
return -1;
}
}
- free(bindBuffer);
return 0;
}
@@ -6288,13 +7514,6 @@ static int32_t prepareStbStmtBindRand(
int64_t startTime, int32_t recSeq,
int32_t timePrec)
{
- char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
- if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
- __func__, __LINE__, g_args.len_of_binary);
- return -1;
- }
-
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
char *ptr = data;
@@ -6324,51 +7543,19 @@ static int32_t prepareStbStmtBindRand(
ptr += bind->buffer_length;
} else if ( -1 == prepareStmtBindArrayByTypeForRand(
bind,
- stbInfo->columns[i-1].dataType,
+ stbInfo->columns[i-1].data_type,
stbInfo->columns[i-1].dataLen,
timePrec,
&ptr,
NULL)) {
- tmfree(bindBuffer);
return -1;
}
}
- tmfree(bindBuffer);
- return 0;
-}
-
-static int32_t prepareStbStmtBindWithSample(
- int64_t *ts,
- char *bindArray, SSuperTable *stbInfo,
- int64_t startTime, int32_t recSeq,
- int32_t timePrec,
- int64_t samplePos)
-{
- TAOS_BIND *bind;
-
- bind = (TAOS_BIND *)bindArray;
-
- int64_t *bind_ts = ts;
-
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- if (stbInfo->disorderRatio) {
- *bind_ts = startTime + getTSRandTail(
- stbInfo->timeStampStep, recSeq,
- stbInfo->disorderRatio,
- stbInfo->disorderRange);
- } else {
- *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
- }
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = bind_ts;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
-
return 0;
}
-static int32_t prepareStbStmtRand(
+UNUSED_FUNC static int32_t prepareStbStmtRand(
threadInfo *pThreadInfo,
char *tableName,
int64_t tableSeq,
@@ -6393,7 +7580,7 @@ static int32_t prepareStbStmtRand(
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6401,7 +7588,7 @@ static int32_t prepareStbStmtRand(
char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
if (NULL == tagsArray) {
tmfree(tagsValBuf);
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6420,14 +7607,14 @@ static int32_t prepareStbStmtRand(
tmfree(tagsArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
} else {
ret = taos_stmt_set_tbname(stmt, tableName);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6435,7 +7622,7 @@ static int32_t prepareStbStmtRand(
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
__func__, __LINE__, (stbInfo->columnCount + 1));
return -1;
}
@@ -6454,7 +7641,7 @@ static int32_t prepareStbStmtRand(
}
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6462,7 +7649,7 @@ static int32_t prepareStbStmtRand(
// if msg > 3MB, break
ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6480,7 +7667,8 @@ static int32_t prepareStbStmtRand(
return k;
}
-static int32_t prepareStbStmtWithSample(
+#if STMT_BIND_PARAM_BATCH == 1
+static int execBindParamBatch(
threadInfo *pThreadInfo,
char *tableName,
int64_t tableSeq,
@@ -6491,85 +7679,611 @@ static int32_t prepareStbStmtWithSample(
int64_t *pSamplePos)
{
int ret;
- SSuperTable *stbInfo = pThreadInfo->stbInfo;
TAOS_STMT *stmt = pThreadInfo->stmt;
- if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
- char* tagsValBuf = NULL;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ uint32_t columnCount = (stbInfo)?pThreadInfo->stbInfo->columnCount:g_args.columnCount;
+
+ uint32_t thisBatch = MAX_SAMPLES - (*pSamplePos);
+
+ if (thisBatch > batch) {
+ thisBatch = batch;
+ }
+ verbosePrint("%s() LN%d, batch=%d pos=%"PRId64" thisBatch=%d\n",
+ __func__, __LINE__, batch, *pSamplePos, thisBatch);
+
+ memset(pThreadInfo->bindParams, 0,
+ (sizeof(TAOS_MULTI_BIND) * (columnCount + 1)));
+ memset(pThreadInfo->is_null, 0, thisBatch);
+
+ for (int c = 0; c < columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+
+ char data_type;
+
+ if (c == 0) {
+ data_type = TSDB_DATA_TYPE_TIMESTAMP;
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = pThreadInfo->bind_ts_array;
- if (0 == stbInfo->tagSource) {
- tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
} else {
- tagsValBuf = getTagValueFromTagSample(
- stbInfo,
- tableSeq % stbInfo->tagSampleCount);
- }
+ data_type = (stbInfo)?stbInfo->columns[c-1].data_type:g_args.data_type[c-1];
- if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
- __func__, __LINE__);
- return -1;
- }
+ char *tmpP;
- char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
- if (NULL == tagsArray) {
- tmfree(tagsValBuf);
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
- __func__, __LINE__);
- return -1;
- }
+ switch(data_type) {
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ param->buffer_length =
+ ((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth);
- if (-1 == prepareStbStmtBindTag(
- tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
- /* is tag */)) {
- tmfree(tagsValBuf);
- tmfree(tagsArray);
- return -1;
- }
+ tmpP =
+ (char *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1)));
- ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
+ verbosePrint("%s() LN%d, tmpP=%p pos=%"PRId64" width=%d position=%"PRId64"\n",
+ __func__, __LINE__, tmpP, *pSamplePos,
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)),
+ (*pSamplePos) *
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth)));
- tmfree(tagsValBuf);
- tmfree(tagsArray);
+ param->buffer = (void *)(tmpP + *pSamplePos *
+ (((stbInfo)?stbInfo->columns[c-1].dataLen:g_args.binwidth))
+ );
+ break;
- if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
- __func__, __LINE__, taos_stmt_errstr(stmt));
- return -1;
+ case TSDB_DATA_TYPE_INT:
+ param->buffer_length = sizeof(int32_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int32_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(
+ stbInfo->sampleBindBatchArray
+ +sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen*(*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(
+ g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int8_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ param->buffer_length = sizeof(int16_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int16_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int64_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ param->buffer_length = sizeof(int8_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int8_t)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ param->buffer_length = sizeof(float);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(float)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ param->buffer_length = sizeof(double);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(double)*(*pSamplePos));
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ param->buffer_length = sizeof(int64_t);
+ param->buffer = (stbInfo)?
+ (void *)((uintptr_t)*(uintptr_t*)(stbInfo->sampleBindBatchArray+sizeof(char*)*(c-1))
+ + stbInfo->columns[c-1].dataLen * (*pSamplePos)):
+ (void *)((uintptr_t)*(uintptr_t*)(g_sampleBindBatchArray+sizeof(char*)*(c-1))
+ + sizeof(int64_t)*(*pSamplePos));
+ break;
+
+ default:
+ errorPrint("%s() LN%d, wrong data type: %d\n",
+ __func__,
+ __LINE__,
+ data_type);
+ exit(EXIT_FAILURE);
+
+ }
+ }
+
+ param->buffer_type = data_type;
+ param->length = malloc(sizeof(int32_t) * thisBatch);
+ assert(param->length);
+
+ for (int b = 0; b < thisBatch; b++) {
+ if (param->buffer_type == TSDB_DATA_TYPE_NCHAR) {
+ param->length[b] = strlen(
+ (char *)param->buffer + b *
+ ((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)
+ );
+ } else {
+ param->length[b] = param->buffer_length;
+ }
+ }
+ param->is_null = pThreadInfo->is_null;
+ param->num = thisBatch;
+ }
+
+ uint32_t k;
+ for (k = 0; k < thisBatch;) {
+ /* columnCount + 1 (ts) */
+ if (stbInfo->disorderRatio) {
+ *(pThreadInfo->bind_ts_array + k) = startTime + getTSRandTail(
+ stbInfo->timeStampStep, k,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *(pThreadInfo->bind_ts_array + k) = startTime + stbInfo->timeStampStep * k;
+ }
+
+ debugPrint("%s() LN%d, k=%d ts=%"PRId64"\n",
+ __func__, __LINE__,
+ k, *(pThreadInfo->bind_ts_array +k));
+ k++;
+ recordFrom ++;
+
+ (*pSamplePos) ++;
+ if ((*pSamplePos) == MAX_SAMPLES) {
+ *pSamplePos = 0;
+ }
+
+ if (recordFrom >= insertRows) {
+ break;
}
+ }
+
+ ret = taos_stmt_bind_param_batch(stmt, (TAOS_MULTI_BIND *)pThreadInfo->bindParams);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+
+ for (int c = 0; c < stbInfo->columnCount + 1; c ++) {
+ TAOS_MULTI_BIND *param = (TAOS_MULTI_BIND *)(pThreadInfo->bindParams + sizeof(TAOS_MULTI_BIND) * c);
+ free(param->length);
+ }
+
+ // if msg > 3MB, break
+ ret = taos_stmt_add_batch(stmt);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ return k;
+}
+
+static int parseSamplefileToStmtBatch(
+ SSuperTable* stbInfo)
+{
+ // char *sampleDataBuf = (stbInfo)?
+ // stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleBindBatchArray = NULL;
+
+ if (stbInfo) {
+ stbInfo->sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = stbInfo->sampleBindBatchArray;
} else {
- ret = taos_stmt_set_tbname(stmt, tableName);
- if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
- __func__, __LINE__, taos_stmt_errstr(stmt));
+ g_sampleBindBatchArray = calloc(1, sizeof(uintptr_t *) * columnCount);
+ sampleBindBatchArray = g_sampleBindBatchArray;
+ }
+ assert(sampleBindBatchArray);
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?stbInfo->columns[c].data_type:g_args.data_type[c];
+
+ char *tmpP = NULL;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ tmpP = calloc(1, sizeof(int) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ tmpP = calloc(1, sizeof(int16_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ tmpP = calloc(1, sizeof(int8_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ tmpP = calloc(1, sizeof(float) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ tmpP = calloc(1, sizeof(double) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = calloc(1, MAX_SAMPLES *
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth)));
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ tmpP = calloc(1, sizeof(int64_t) * MAX_SAMPLES);
+ assert(tmpP);
+ *(uintptr_t*)(sampleBindBatchArray+ sizeof(uintptr_t*)*c) = (uintptr_t)tmpP;
+ break;
+
+ default:
+ errorPrint("Unknown data type: %s\n",
+ (stbInfo)?stbInfo->columns[c].dataType:g_args.dataType[c]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount; c++) {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c].data_type:
+ g_args.data_type[c];
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *tmpStr = calloc(1, index + 1);
+ if (NULL == tmpStr) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(tmpStr, restStr, index);
+ cursor += index + 1; // skip ',' too
+ char *tmpP;
+
+ switch(data_type) {
+ case TSDB_DATA_TYPE_INT:
+ *((int32_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int32_t)*i)) =
+ atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ *(float*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(float)*i)) =
+ (float)atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ *(double*)(((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(double)*i)) =
+ atof(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ *((int16_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int16_t)*i)) =
+ (int16_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BOOL:
+ *((int8_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int8_t)*i)) =
+ (int8_t)atoi(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ *((int64_t*)((uintptr_t)*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c)+sizeof(int64_t)*i)) =
+ (int64_t)atol(tmpStr);
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ tmpP = (char *)(*(uintptr_t*)(sampleBindBatchArray
+ +sizeof(char*)*c));
+ strcpy(tmpP + i*
+ (((stbInfo)?stbInfo->columns[c].dataLen:g_args.binwidth))
+ , tmpStr);
+ break;
+
+ default:
+ break;
+ }
+
+ free(tmpStr);
+ }
+ }
+
+ return 0;
+}
+
+static int parseSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ uint32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+
+ pThreadInfo->bind_ts_array = malloc(sizeof(int64_t) * batch);
+ assert(pThreadInfo->bind_ts_array);
+
+ pThreadInfo->bindParams = malloc(sizeof(TAOS_MULTI_BIND) * (columnCount + 1));
+ assert(pThreadInfo->bindParams);
+
+ pThreadInfo->is_null = malloc(batch);
+ assert(pThreadInfo->is_null);
+
+ return 0;
+}
+
+static int parseStbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo,
+ uint32_t timePrec,
+ uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+}
+
+static int parseNtbSampleToStmtBatchForThread(
+ threadInfo *pThreadInfo, uint32_t timePrec, uint32_t batch)
+{
+ return parseSampleToStmtBatchForThread(
+ pThreadInfo, NULL, timePrec, batch);
+}
+
+#else
+static int parseSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ pThreadInfo->sampleBindArray =
+ (char *)calloc(1, sizeof(char *) * MAX_SAMPLES);
+ if (pThreadInfo->sampleBindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
+ __func__, __LINE__,
+ (uint64_t)sizeof(char *) * MAX_SAMPLES);
+ return -1;
+ }
+
+ int32_t columnCount = (stbInfo)?stbInfo->columnCount:g_args.columnCount;
+ char *sampleDataBuf = (stbInfo)?stbInfo->sampleDataBuf:g_sampleDataBuf;
+ int64_t lenOfOneRow = (stbInfo)?stbInfo->lenOfOneRow:g_args.lenOfOneRow;
+
+ for (int i=0; i < MAX_SAMPLES; i++) {
+ char *bindArray =
+ calloc(1, sizeof(TAOS_BIND) * (columnCount + 1));
+ if (bindArray == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
+ __func__, __LINE__, (columnCount + 1));
return -1;
}
+
+ TAOS_BIND *bind;
+ int cursor = 0;
+
+ for (int c = 0; c < columnCount + 1; c++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c));
+
+ if (c == 0) {
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = NULL; //bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ } else {
+ char data_type = (stbInfo)?
+ stbInfo->columns[c-1].data_type:
+ g_args.data_type[c-1];
+ int32_t dataLen = (stbInfo)?
+ stbInfo->columns[c-1].dataLen:
+ g_args.binwidth;
+ char *restStr = sampleDataBuf
+ + lenOfOneRow * i + cursor;
+ int lengthOfRest = strlen(restStr);
+
+ int index = 0;
+ for (index = 0; index < lengthOfRest; index ++) {
+ if (restStr[index] == ',') {
+ break;
+ }
+ }
+
+ char *bindBuffer = calloc(1, index + 1);
+ if (bindBuffer == NULL) {
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
+ __func__, __LINE__, index + 1);
+ return -1;
+ }
+
+ strncpy(bindBuffer, restStr, index);
+ cursor += index + 1; // skip ',' too
+
+ if (-1 == prepareStmtBindArrayByType(
+ bind,
+ data_type,
+ dataLen,
+ timePrec,
+ bindBuffer)) {
+ free(bindBuffer);
+ free(bindArray);
+ return -1;
+ }
+ free(bindBuffer);
+ }
+ }
+ *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) =
+ (uintptr_t)bindArray;
}
+ return 0;
+}
+
+static int parseStbSampleToStmt(
+ threadInfo *pThreadInfo,
+ SSuperTable *stbInfo, uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ stbInfo, timePrec);
+}
+
+static int parseNtbSampleToStmt(
+ threadInfo *pThreadInfo,
+ uint32_t timePrec)
+{
+ return parseSampleToStmt(
+ pThreadInfo,
+ NULL,
+ timePrec);
+}
+
+static int32_t prepareStbStmtBindStartTime(
+ char *tableName,
+ int64_t *ts,
+ char *bindArray, SSuperTable *stbInfo,
+ int64_t startTime, int32_t recSeq)
+{
+ TAOS_BIND *bind;
+
+ bind = (TAOS_BIND *)bindArray;
+
+ int64_t *bind_ts = ts;
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ if (stbInfo->disorderRatio) {
+ *bind_ts = startTime + getTSRandTail(
+ stbInfo->timeStampStep, recSeq,
+ stbInfo->disorderRatio,
+ stbInfo->disorderRange);
+ } else {
+ *bind_ts = startTime + stbInfo->timeStampStep * recSeq;
+ }
+
+ verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n",
+ __func__, __LINE__, tableName, *bind_ts);
+
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = bind_ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+
+ return 0;
+}
+
+static uint32_t execBindParam(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
uint32_t k;
for (k = 0; k < batch;) {
char *bindArray = (char *)(*((uintptr_t *)
- (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos))));
+ (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos))));
/* columnCount + 1 (ts) */
- if (-1 == prepareStbStmtBindWithSample(
+ if (-1 == prepareStbStmtBindStartTime(
+ tableName,
pThreadInfo->bind_ts,
bindArray, stbInfo,
- startTime, k,
- pThreadInfo->time_precision,
- *pSamplePos
+ startTime, k
/* is column */)) {
return -1;
}
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
// if msg > 3MB, break
ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6578,7 +8292,7 @@ static int32_t prepareStbStmtWithSample(
recordFrom ++;
(*pSamplePos) ++;
- if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) {
+ if ((*pSamplePos) == MAX_SAMPLES) {
*pSamplePos = 0;
}
@@ -6591,147 +8305,671 @@ static int32_t prepareStbStmtWithSample(
}
#endif
-static int32_t generateStbProgressiveData(
- SSuperTable *stbInfo,
- char *tableName,
- int64_t tableSeq,
- char *dbName, char *buffer,
- int64_t insertRows,
- uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos,
- int64_t *pRemainderBufLen)
-{
- assert(buffer != NULL);
- char *pstr = buffer;
+static int32_t prepareStbStmt(
+ threadInfo *pThreadInfo,
+ char *tableName,
+ int64_t tableSeq,
+ uint32_t batch,
+ uint64_t insertRows,
+ uint64_t recordFrom,
+ int64_t startTime,
+ int64_t *pSamplePos)
+{
+ int ret;
+ SSuperTable *stbInfo = pThreadInfo->stbInfo;
+ TAOS_STMT *stmt = pThreadInfo->stmt;
+
+ if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) {
+ char* tagsValBuf = NULL;
+
+ if (0 == stbInfo->tagSource) {
+ tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq);
+ } else {
+ tagsValBuf = getTagValueFromTagSample(
+ stbInfo,
+ tableSeq % stbInfo->tagSampleCount);
+ }
+
+ if (NULL == tagsValBuf) {
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
+ if (NULL == tagsArray) {
+ tmfree(tagsValBuf);
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ if (-1 == prepareStbStmtBindTag(
+ tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision
+ /* is tag */)) {
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+ return -1;
+ }
+
+ ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray);
+
+ tmfree(tagsValBuf);
+ tmfree(tagsArray);
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ } else {
+ ret = taos_stmt_set_tbname(stmt, tableName);
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ return -1;
+ }
+ }
+
+#if STMT_BIND_PARAM_BATCH == 1
+ return execBindParamBatch(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#else
+ return execBindParam(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows,
+ recordFrom,
+ startTime,
+ pSamplePos);
+#endif
+}
+
+static int32_t generateStbProgressiveData(
+ SSuperTable *stbInfo,
+ char *tableName,
+ int64_t tableSeq,
+ char *dbName, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos,
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(pstr, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateStbSQLHead(
+ stbInfo,
+ tableName, tableSeq, dbName,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateStbDataTail(stbInfo,
+ g_args.reqPerReq, pstr, *pRemainderBufLen,
+ insertRows, recordFrom,
+ startTime,
+ pSamplePos, &dataLen);
+}
+
+static int32_t generateProgressiveDataWithoutStb(
+ char *tableName,
+ /* int64_t tableSeq, */
+ threadInfo *pThreadInfo, char *buffer,
+ int64_t insertRows,
+ uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
+ int64_t *pRemainderBufLen)
+{
+ assert(buffer != NULL);
+ char *pstr = buffer;
+
+ memset(buffer, 0, *pRemainderBufLen);
+
+ int64_t headLen = generateSQLHeadWithoutStb(
+ tableName, pThreadInfo->db_name,
+ buffer, *pRemainderBufLen);
+
+ if (headLen <= 0) {
+ return 0;
+ }
+ pstr += headLen;
+ *pRemainderBufLen -= headLen;
+
+ int64_t dataLen;
+
+ return generateDataTailWithoutStb(
+ g_args.reqPerReq, pstr, *pRemainderBufLen, insertRows, recordFrom,
+ startTime,
+ /*pSamplePos, */&dataLen);
+}
+
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ if (0 == pThreadInfo->totalDelay)
+ pThreadInfo->totalDelay = 1;
+
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0))
+ );
+}
+
+#if STMT_BIND_PARAM_BATCH == 1
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ int64_t timeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ timeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ timeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint64_t timesInterlace = (insertRows / interlaceRows) + 1;
+ uint32_t precalcBatch = interlaceRows;
+
+ if (precalcBatch > g_args.reqPerReq)
+ precalcBatch = g_args.reqPerReq;
+
+ if (precalcBatch > MAX_SAMPLES)
+ precalcBatch = MAX_SAMPLES;
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime;
+
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+ pThreadInfo->samplePos = 0;
+
+ for (int64_t interlace = 0;
+ interlace < timesInterlace; interlace ++) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ int64_t generated = 0;
+ int64_t samplePos;
+
+ for (; tableSeq < pThreadInfo->start_table_from + pThreadInfo->ntables; tableSeq ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ samplePos = pThreadInfo->samplePos;
+ startTime = pThreadInfo->start_time
+ + interlace * interlaceRows * timeStampStep;
+ uint64_t remainRecPerTbl =
+ insertRows - interlaceRows * interlace;
+ uint64_t recPerTbl = 0;
+
+ uint64_t remainPerInterlace;
+ if (remainRecPerTbl > interlaceRows) {
+ remainPerInterlace = interlaceRows;
+ } else {
+ remainPerInterlace = remainRecPerTbl;
+ }
+
+ while(remainPerInterlace > 0) {
+
+ uint32_t batch;
+ if (remainPerInterlace > precalcBatch) {
+ batch = precalcBatch;
+ } else {
+ batch = remainPerInterlace;
+ }
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batch, startTime);
+
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batch,
+ insertRows, 0,
+ startTime,
+ &samplePos);
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batch,
+ insertRows,
+ interlaceRows * interlace + recPerTbl,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ recPerTbl += generated;
+ remainPerInterlace -= generated;
+ pThreadInfo->totalInsertRows += generated;
+
+ verbosePrint("[%d] %s() LN%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->totalInsertRows);
+
+ startTs = taosGetTimestampUs();
+
+ int64_t affectedRows = execInsert(pThreadInfo, generated);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
+
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
+
+ if (generated != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert() insert %"PRId64", affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generated, affectedRows);
+ goto free_of_interlace_stmt;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ startTime += (generated * timeStampStep);
+ }
+ }
+ pThreadInfo->samplePos = samplePos;
+
+ if (tableSeq == pThreadInfo->start_table_from
+ + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+
+ flagSleep = true;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+#else
+// stmt sync write interlace data
+static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) {
+ debugPrint("[%d] %s() LN%d: ### stmt interlace write\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+
+ int64_t insertRows;
+ uint64_t maxSqlLen;
+ int64_t timeStampStep;
+ uint64_t insert_interval;
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+
+ if (stbInfo) {
+ insertRows = stbInfo->insertRows;
+ maxSqlLen = stbInfo->maxSqlLen;
+ timeStampStep = stbInfo->timeStampStep;
+ insert_interval = stbInfo->insertInterval;
+ } else {
+ insertRows = g_args.insertRows;
+ maxSqlLen = g_args.max_sql_len;
+ timeStampStep = g_args.timestamp_step;
+ insert_interval = g_args.insert_interval;
+ }
+
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ pThreadInfo->start_table_from,
+ pThreadInfo->ntables, insertRows);
+
+ uint32_t batchPerTbl = interlaceRows;
+ uint32_t batchPerTblTimes;
+
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
+
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ g_args.reqPerReq / interlaceRows;
+ } else {
+ batchPerTblTimes = 1;
+ }
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ uint64_t tableSeq = pThreadInfo->start_table_from;
+ int64_t startTime = pThreadInfo->start_time;
+
+ uint64_t generatedRecPerTbl = 0;
+ bool flagSleep = true;
+ uint64_t sleepTimeTotal = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
+ if ((flagSleep) && (insert_interval)) {
+ st = taosGetTimestampMs();
+ flagSleep = false;
+ }
+
+ uint32_t recOfBatch = 0;
+
+ int32_t generated;
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+
+ getTableName(tableName, pThreadInfo, tableSeq);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__,
+ tableName, batchPerTbl, startTime);
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ batchPerTbl,
+ insertRows, 0,
+ startTime,
+ &(pThreadInfo->samplePos));
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ batchPerTbl,
+ insertRows, i,
+ startTime);
+ }
+
+ debugPrint("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ if (generated < 0) {
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
+ goto free_of_interlace_stmt;
+ } else if (generated == 0) {
+ break;
+ }
+
+ tableSeq ++;
+ recOfBatch += batchPerTbl;
+
+ pThreadInfo->totalInsertRows += batchPerTbl;
+
+ verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl, recOfBatch);
+
+ if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
+ // turn to first table
+ tableSeq = pThreadInfo->start_table_from;
+ generatedRecPerTbl += batchPerTbl;
+
+ startTime = pThreadInfo->start_time
+ + generatedRecPerTbl * timeStampStep;
+
+ flagSleep = true;
+ if (generatedRecPerTbl >= insertRows)
+ break;
+
+ int64_t remainRows = insertRows - generatedRecPerTbl;
+ if ((remainRows > 0) && (batchPerTbl > remainRows))
+ batchPerTbl = remainRows;
+
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
+ break;
+ }
+
+ verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ generatedRecPerTbl, insertRows);
+
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
+ break;
+ }
- memset(pstr, 0, *pRemainderBufLen);
+ verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
+ pThreadInfo->totalInsertRows);
- int64_t headLen = generateStbSQLHead(
- stbInfo,
- tableName, tableSeq, dbName,
- buffer, *pRemainderBufLen);
+ startTs = taosGetTimestampUs();
- if (headLen <= 0) {
- return 0;
- }
- pstr += headLen;
- *pRemainderBufLen -= headLen;
+ if (recOfBatch == 0) {
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ batchPerTbl);
+ if (batchPerTbl > 0) {
+ errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n",
+ batchPerTbl, maxSqlLen / batchPerTbl);
+ }
+ goto free_of_interlace_stmt;
+ }
+ int64_t affectedRows = execInsert(pThreadInfo, recOfBatch);
- int64_t dataLen;
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.2f ms\n",
+ __func__, __LINE__, delay / 1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
- return generateStbDataTail(stbInfo,
- g_args.num_of_RPR, pstr, *pRemainderBufLen,
- insertRows, recordFrom,
- startTime,
- pSamplePos, &dataLen);
-}
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
-static int32_t generateProgressiveDataWithoutStb(
- char *tableName,
- /* int64_t tableSeq, */
- threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
- uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */
- int64_t *pRemainderBufLen)
-{
- assert(buffer != NULL);
- char *pstr = buffer;
+ if (recOfBatch != affectedRows) {
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n",
+ pThreadInfo->threadID, __func__, __LINE__,
+ recOfBatch, affectedRows);
+ goto free_of_interlace_stmt;
+ }
- memset(buffer, 0, *pRemainderBufLen);
+ pThreadInfo->totalAffectedRows += affectedRows;
- int64_t headLen = generateSQLHeadWithoutStb(
- tableName, pThreadInfo->db_name,
- buffer, *pRemainderBufLen);
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
- if (headLen <= 0) {
- return 0;
- }
- pstr += headLen;
- *pRemainderBufLen -= headLen;
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
- int64_t dataLen;
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
- return generateDataTailWithoutStb(
- g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom,
- startTime,
- /*pSamplePos, */&dataLen);
+free_of_interlace_stmt:
+ printStatPerThread(pThreadInfo);
+ return NULL;
}
-static void printStatPerThread(threadInfo *pThreadInfo)
-{
- fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows,
- (pThreadInfo->totalDelay)?
- (double)(pThreadInfo->totalAffectedRows/((double)pThreadInfo->totalDelay/1000000.0)):
- FLT_MAX);
-}
+#endif
// sync write interlace data
-static void* syncWriteInterlace(threadInfo *pThreadInfo) {
+static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
int64_t insertRows;
- uint32_t interlaceRows;
uint64_t maxSqlLen;
- int64_t nTimeStampStep;
+ int64_t timeStampStep;
uint64_t insert_interval;
- bool sourceRand;
-
SSuperTable* stbInfo = pThreadInfo->stbInfo;
if (stbInfo) {
insertRows = stbInfo->insertRows;
-
- if ((stbInfo->interlaceRows == 0)
- && (g_args.interlace_rows > 0)) {
- interlaceRows = g_args.interlace_rows;
- } else {
- interlaceRows = stbInfo->interlaceRows;
- }
maxSqlLen = stbInfo->maxSqlLen;
- nTimeStampStep = stbInfo->timeStampStep;
+ timeStampStep = stbInfo->timeStampStep;
insert_interval = stbInfo->insertInterval;
- if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) {
- sourceRand = true;
- } else {
- sourceRand = false; // from sample data file
- }
} else {
- insertRows = g_args.num_of_DPT;
- interlaceRows = g_args.interlace_rows;
+ insertRows = g_args.insertRows;
maxSqlLen = g_args.max_sql_len;
- nTimeStampStep = g_args.timestamp_step;
+ timeStampStep = g_args.timestamp_step;
insert_interval = g_args.insert_interval;
- sourceRand = true;
}
debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
-
- if (interlaceRows > insertRows)
- interlaceRows = insertRows;
-
- if (interlaceRows > g_args.num_of_RPR)
- interlaceRows = g_args.num_of_RPR;
+#if 1
+ if (interlaceRows > g_args.reqPerReq)
+ interlaceRows = g_args.reqPerReq;
uint32_t batchPerTbl = interlaceRows;
uint32_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
- g_args.num_of_RPR / interlaceRows;
+ g_args.reqPerReq / interlaceRows;
} else {
batchPerTblTimes = 1;
}
+#else
+ uint32_t batchPerTbl;
+ if (interlaceRows > g_args.reqPerReq)
+ batchPerTbl = g_args.reqPerReq;
+ else
+ batchPerTbl = interlaceRows;
+
+ uint32_t batchPerTblTimes;
+ if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
+ batchPerTblTimes =
+ interlaceRows / batchPerTbl;
+ } else {
+ batchPerTblTimes = 1;
+ }
+#endif
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -6761,6 +8999,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
st = taosGetTimestampMs();
flagSleep = false;
}
+
// generate data
memset(pThreadInfo->buffer, 0, maxSqlLen);
uint64_t remainderBufLen = maxSqlLen;
@@ -6774,12 +9013,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint32_t recOfBatch = 0;
+ int32_t generated;
for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
char tableName[TSDB_TABLE_NAME_LEN];
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -6787,74 +9027,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint64_t oldRemainderLen = remainderBufLen;
- int32_t generated;
if (stbInfo) {
- if (stbInfo->iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
- if (sourceRand) {
- generated = prepareStbStmtRand(
- pThreadInfo,
- tableName,
- tableSeq,
- batchPerTbl,
- insertRows, 0,
- startTime
- );
- } else {
- generated = prepareStbStmtWithSample(
- pThreadInfo,
- tableName,
- tableSeq,
- batchPerTbl,
- insertRows, 0,
- startTime,
- &(pThreadInfo->samplePos));
- }
-#else
- generated = -1;
-#endif
- } else {
- generated = generateStbInterlaceData(
- pThreadInfo,
- tableName, batchPerTbl, i,
- batchPerTblTimes,
- tableSeq,
- pstr,
- insertRows,
- startTime,
- &remainderBufLen);
- }
+ generated = generateStbInterlaceData(
+ pThreadInfo,
+ tableName, batchPerTbl, i,
+ batchPerTblTimes,
+ tableSeq,
+ pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
} else {
- if (g_args.iface == STMT_IFACE) {
- debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n",
- pThreadInfo->threadID,
- __func__, __LINE__,
- tableName, batchPerTbl, startTime);
-#if STMT_IFACE_ENABLED == 1
- generated = prepareStmtWithoutStb(
- pThreadInfo,
- tableName,
- batchPerTbl,
- insertRows, i,
- startTime);
-#else
- generated = -1;
-#endif
- } else {
- generated = generateInterlaceDataWithoutStb(
- tableName, batchPerTbl,
- tableSeq,
- pThreadInfo->db_name, pstr,
- insertRows,
- startTime,
- &remainderBufLen);
- }
+ generated = generateInterlaceDataWithoutStb(
+ tableName, batchPerTbl,
+ tableSeq,
+ pThreadInfo->db_name, pstr,
+ insertRows,
+ startTime,
+ &remainderBufLen);
}
debugPrint("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
- errorPrint("[%d] %s() LN%d, generated records is %d\n",
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
goto free_of_interlace;
} else if (generated == 0) {
@@ -6877,7 +9073,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
generatedRecPerTbl += batchPerTbl;
startTime = pThreadInfo->start_time
- + generatedRecPerTbl * nTimeStampStep;
+ + generatedRecPerTbl * timeStampStep;
flagSleep = true;
if (generatedRecPerTbl >= insertRows)
@@ -6887,7 +9083,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if ((remainRows > 0) && (batchPerTbl > remainRows))
batchPerTbl = remainRows;
- if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
+ if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq)
break;
}
@@ -6895,7 +9091,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__,
generatedRecPerTbl, insertRows);
- if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl)
+ if ((g_args.reqPerReq - recOfBatch) < batchPerTbl)
break;
}
@@ -6908,7 +9104,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampUs();
if (recOfBatch == 0) {
- errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl);
if (batchPerTbl > 0) {
@@ -6935,49 +9131,187 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (recOfBatch != affectedRows) {
- errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, pThreadInfo->buffer);
goto free_of_interlace;
}
- pThreadInfo->totalAffectedRows += affectedRows;
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
+ }
+
+ if ((insert_interval) && flagSleep) {
+ et = taosGetTimestampMs();
+
+ if (insert_interval > (et - st) ) {
+ uint64_t sleepTime = insert_interval - (et -st);
+ performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
+ __func__, __LINE__, sleepTime);
+ taosMsleep(sleepTime); // ms
+ sleepTimeTotal += insert_interval;
+ }
+ }
+ }
+ if (percentComplete < 100)
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+
+free_of_interlace:
+ tmfree(pThreadInfo->buffer);
+ printStatPerThread(pThreadInfo);
+ return NULL;
+}
+
+static void* syncWriteProgressiveStmt(threadInfo *pThreadInfo) {
+ debugPrint("%s() LN%d: ### stmt progressive write\n", __func__, __LINE__);
+
+ SSuperTable* stbInfo = pThreadInfo->stbInfo;
+ int64_t timeStampStep =
+ stbInfo?stbInfo->timeStampStep:g_args.timestamp_step;
+ int64_t insertRows =
+ (stbInfo)?stbInfo->insertRows:g_args.insertRows;
+ verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
+ __func__, __LINE__, insertRows);
+
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
+
+ pThreadInfo->totalInsertRows = 0;
+ pThreadInfo->totalAffectedRows = 0;
+
+ pThreadInfo->samplePos = 0;
+
+ int percentComplete = 0;
+ int64_t totalRows = insertRows * pThreadInfo->ntables;
+
+ for (uint64_t tableSeq = pThreadInfo->start_table_from;
+ tableSeq <= pThreadInfo->end_table_to;
+ tableSeq ++) {
+ int64_t start_time = pThreadInfo->start_time;
+
+ for (uint64_t i = 0; i < insertRows;) {
+ char tableName[TSDB_TABLE_NAME_LEN];
+ getTableName(tableName, pThreadInfo, tableSeq);
+ verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n",
+ __func__, __LINE__,
+ pThreadInfo->threadID, tableSeq, tableName);
+ if (0 == strlen(tableName)) {
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
+ pThreadInfo->threadID, __func__, __LINE__);
+ return NULL;
+ }
+
+ // measure prepare + insert
+ startTs = taosGetTimestampUs();
+
+ int32_t generated;
+ if (stbInfo) {
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ (g_args.reqPerReq>stbInfo->insertRows)?
+ stbInfo->insertRows:
+ g_args.reqPerReq,
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos));
+ } else {
+ generated = prepareStmtWithoutStb(
+ pThreadInfo,
+ tableName,
+ g_args.reqPerReq,
+ insertRows, i,
+ start_time);
+ }
+
+ verbosePrint("[%d] %s() LN%d generated=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, generated);
+
+ if (generated > 0)
+ i += generated;
+ else
+ goto free_of_stmt_progressive;
+
+ start_time += generated * timeStampStep;
+ pThreadInfo->totalInsertRows += generated;
+
+ // only measure insert
+ // startTs = taosGetTimestampUs();
- int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
- if (currentPercent > percentComplete ) {
- printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
- percentComplete = currentPercent;
- }
- int64_t currentPrintTime = taosGetTimestampMs();
- if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
+ int32_t affectedRows = execInsert(pThreadInfo, generated);
+
+ endTs = taosGetTimestampUs();
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %10.f ms\n",
+ __func__, __LINE__, delay/1000.0);
+ verbosePrint("[%d] %s() LN%d affectedRows=%d\n",
pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
- lastPrintTime = currentPrintTime;
- }
+ __func__, __LINE__, affectedRows);
- if ((insert_interval) && flagSleep) {
- et = taosGetTimestampMs();
+ if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
+ if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
+ pThreadInfo->cntDelay++;
+ pThreadInfo->totalDelay += delay;
- if (insert_interval > (et - st) ) {
- uint64_t sleepTime = insert_interval - (et -st);
- performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n",
- __func__, __LINE__, sleepTime);
- taosMsleep(sleepTime); // ms
- sleepTimeTotal += insert_interval;
+ if (affectedRows < 0) {
+ errorPrint2("%s() LN%d, affected rows: %d\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_stmt_progressive;
+ }
+
+ pThreadInfo->totalAffectedRows += affectedRows;
+
+ int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows;
+ if (currentPercent > percentComplete ) {
+ printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent);
+ percentComplete = currentPercent;
+ }
+ int64_t currentPrintTime = taosGetTimestampMs();
+ if (currentPrintTime - lastPrintTime > 30*1000) {
+ printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows);
+ lastPrintTime = currentPrintTime;
}
+
+ if (i >= insertRows)
+ break;
+ } // insertRows
+
+ if ((g_args.verbose_print) &&
+ (tableSeq == pThreadInfo->ntables - 1) && (stbInfo)
+ && (0 == strncasecmp(
+ stbInfo->dataSource,
+ "sample", strlen("sample")))) {
+ verbosePrint("%s() LN%d samplePos=%"PRId64"\n",
+ __func__, __LINE__, pThreadInfo->samplePos);
}
- }
- if (percentComplete < 100)
+ } // tableSeq
+
+ if (percentComplete < 100) {
printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+ }
-free_of_interlace:
+free_of_stmt_progressive:
tmfree(pThreadInfo->buffer);
printStatPerThread(pThreadInfo);
return NULL;
}
-
// sync insertion progressive data
static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
@@ -6987,13 +9321,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t timeStampStep =
stbInfo?stbInfo->timeStampStep:g_args.timestamp_step;
int64_t insertRows =
- (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT;
+ (stbInfo)?stbInfo->insertRows:g_args.insertRows;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n",
__func__, __LINE__, insertRows);
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
@@ -7006,17 +9340,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0;
- bool sourceRand;
- if (stbInfo) {
- if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) {
- sourceRand = true;
- } else {
- sourceRand = false; // from sample data file
- }
- } else {
- sourceRand = true;
- }
-
pThreadInfo->samplePos = 0;
int percentComplete = 0;
@@ -7034,7 +9357,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__,
pThreadInfo->threadID, tableSeq, tableName);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -7049,31 +9372,21 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
+ // measure prepare + insert
+ startTs = taosGetTimestampUs();
+
int32_t generated;
if (stbInfo) {
if (stbInfo->iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
- if (sourceRand) {
- generated = prepareStbStmtRand(
- pThreadInfo,
- tableName,
- tableSeq,
- g_args.num_of_RPR,
- insertRows,
- i, start_time
- );
- } else {
- generated = prepareStbStmtWithSample(
- pThreadInfo,
- tableName,
- tableSeq,
- g_args.num_of_RPR,
- insertRows, i, start_time,
- &(pThreadInfo->samplePos));
- }
-#else
- generated = -1;
-#endif
+ generated = prepareStbStmt(
+ pThreadInfo,
+ tableName,
+ tableSeq,
+ (g_args.reqPerReq>stbInfo->insertRows)?
+ stbInfo->insertRows:
+ g_args.reqPerReq,
+ insertRows, i, start_time,
+ &(pThreadInfo->samplePos));
} else {
generated = generateStbProgressiveData(
stbInfo,
@@ -7085,16 +9398,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
}
} else {
if (g_args.iface == STMT_IFACE) {
-#if STMT_IFACE_ENABLED == 1
generated = prepareStmtWithoutStb(
pThreadInfo,
tableName,
- g_args.num_of_RPR,
+ g_args.reqPerReq,
insertRows, i,
start_time);
-#else
- generated = -1;
-#endif
} else {
generated = generateProgressiveDataWithoutStb(
tableName,
@@ -7105,6 +9414,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
&remainderBufLen);
}
}
+
+ verbosePrint("[%d] %s() LN%d generated=%d\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, generated);
+
if (generated > 0)
i += generated;
else
@@ -7113,7 +9427,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
- startTs = taosGetTimestampUs();
+ // only measure insert
+ // startTs = taosGetTimestampUs();
int32_t affectedRows = execInsert(pThreadInfo, generated);
@@ -7131,7 +9446,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (affectedRows < 0) {
- errorPrint("%s() LN%d, affected rows: %d\n",
+ errorPrint2("%s() LN%d, affected rows: %d\n",
__func__, __LINE__, affectedRows);
goto free_of_progressive;
}
@@ -7154,7 +9469,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (i >= insertRows)
break;
- } // num_of_DPT
+ } // insertRows
if ((g_args.verbose_print) &&
(tableSeq == pThreadInfo->ntables - 1) && (stbInfo)
@@ -7165,8 +9480,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__, pThreadInfo->samplePos);
}
} // tableSeq
- if (percentComplete < 100)
+
+ if (percentComplete < 100) {
printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete);
+ }
free_of_progressive:
tmfree(pThreadInfo->buffer);
@@ -7181,26 +9498,40 @@ static void* syncWrite(void *sarg) {
setThreadName("syncWrite");
- uint32_t interlaceRows;
+ uint32_t interlaceRows = 0;
if (stbInfo) {
- if ((stbInfo->interlaceRows == 0)
- && (g_args.interlace_rows > 0)) {
- interlaceRows = g_args.interlace_rows;
- } else {
+ if (stbInfo->interlaceRows < stbInfo->insertRows)
interlaceRows = stbInfo->interlaceRows;
- }
} else {
- interlaceRows = g_args.interlace_rows;
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
}
if (interlaceRows > 0) {
// interlace mode
- return syncWriteInterlace(pThreadInfo);
+ if (stbInfo) {
+ if (STMT_IFACE == stbInfo->iface) {
+#if STMT_BIND_PARAM_BATCH == 1
+ return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows);
+#else
+ return syncWriteInterlaceStmt(pThreadInfo, interlaceRows);
+#endif
+ } else {
+ return syncWriteInterlace(pThreadInfo, interlaceRows);
+ }
+ }
} else {
- // progressive mode
- return syncWriteProgressive(pThreadInfo);
+ // progressive mode
+ if (((stbInfo) && (STMT_IFACE == stbInfo->iface))
+ || (STMT_IFACE == g_args.iface)) {
+ return syncWriteProgressiveStmt(pThreadInfo);
+ } else {
+ return syncWriteProgressive(pThreadInfo);
+ }
}
+
+ return NULL;
}
static void callBack(void *param, TAOS_RES *res, int code) {
@@ -7219,11 +9550,11 @@ static void callBack(void *param, TAOS_RES *res, int code) {
char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen);
char data[MAX_DATA_SIZE];
char *pstr = buffer;
- pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values",
+ pstr += sprintf(pstr, "INSERT INTO %s.%s%"PRId64" VALUES",
pThreadInfo->db_name, pThreadInfo->tb_prefix,
pThreadInfo->start_table_from);
// if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) {
- if (pThreadInfo->counter >= g_args.num_of_RPR) {
+ if (pThreadInfo->counter >= g_args.reqPerReq) {
pThreadInfo->start_table_from++;
pThreadInfo->counter = 0;
}
@@ -7234,7 +9565,7 @@ static void callBack(void *param, TAOS_RES *res, int code) {
return;
}
- for (int i = 0; i < g_args.num_of_RPR; i++) {
+ for (int i = 0; i < g_args.reqPerReq; i++) {
int rand_num = taosRandom() % 100;
if (0 != pThreadInfo->stbInfo->disorderRatio
&& rand_num < pThreadInfo->stbInfo->disorderRatio) {
@@ -7293,7 +9624,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
uint16_t rest_port = port + TSDB_PORT_HTTP;
struct hostent *server = gethostbyname(host);
if ((server == NULL) || (server->h_addr == NULL)) {
- errorPrint("%s", "ERROR, no such host");
+ errorPrint2("%s", "no such host");
return -1;
}
@@ -7314,79 +9645,6 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
return 0;
}
-#if STMT_IFACE_ENABLED == 1
-static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec)
-{
- stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE);
- if (stbInfo->sampleBindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
- __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE);
- return -1;
- }
-
-
- for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) {
- char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
- if (bindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
- __func__, __LINE__, (stbInfo->columnCount + 1));
- return -1;
- }
-
-
- TAOS_BIND *bind;
- int cursor = 0;
-
- for (int c = 0; c < stbInfo->columnCount + 1; c++) {
- bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c));
-
- if (c == 0) {
- bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
- bind->buffer_length = sizeof(int64_t);
- bind->buffer = NULL; //bind_ts;
- bind->length = &bind->buffer_length;
- bind->is_null = NULL;
- } else {
- char *restStr = stbInfo->sampleDataBuf
- + stbInfo->lenOfOneRow * i + cursor;
- int lengthOfRest = strlen(restStr);
-
- int index = 0;
- for (index = 0; index < lengthOfRest; index ++) {
- if (restStr[index] == ',') {
- break;
- }
- }
-
- char *bindBuffer = calloc(1, index + 1);
- if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
- __func__, __LINE__, DOUBLE_BUFF_LEN);
- return -1;
- }
-
- strncpy(bindBuffer, restStr, index);
- cursor += index + 1; // skip ',' too
-
- if (-1 == prepareStmtBindArrayByType(
- bind,
- stbInfo->columns[c-1].dataType,
- stbInfo->columns[c-1].dataLen,
- timePrec,
- bindBuffer)) {
- free(bindBuffer);
- return -1;
- }
- free(bindBuffer);
- }
- }
- *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray;
- }
-
- return 0;
-}
-#endif
-
static void startMultiThreadInsertData(int threads, char* db_name,
char* precision, SSuperTable* stbInfo) {
@@ -7396,52 +9654,52 @@ static void startMultiThreadInsertData(int threads, char* db_name,
timePrec = TSDB_TIME_PRECISION_MILLI;
} else if (0 == strncasecmp(precision, "us", 2)) {
timePrec = TSDB_TIME_PRECISION_MICRO;
-#if NANO_SECOND_ENABLED == 1
} else if (0 == strncasecmp(precision, "ns", 2)) {
timePrec = TSDB_TIME_PRECISION_NANO;
-#endif
} else {
- errorPrint("Not support precision: %s\n", precision);
+ errorPrint2("Not support precision: %s\n", precision);
exit(EXIT_FAILURE);
}
}
- int64_t start_time;
+ int64_t startTime;
if (stbInfo) {
if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) {
- start_time = taosGetTimestamp(timePrec);
+ startTime = taosGetTimestamp(timePrec);
} else {
if (TSDB_CODE_SUCCESS != taosParseTime(
stbInfo->startTimestamp,
- &start_time,
+ &startTime,
strlen(stbInfo->startTimestamp),
timePrec, 0)) {
ERROR_EXIT("failed to parse time!\n");
}
}
} else {
- start_time = 1500000000000;
+ startTime = DEFAULT_START_TIME;
}
- debugPrint("%s() LN%d, start_time= %"PRId64"\n",
- __func__, __LINE__, start_time);
-
- int64_t start = taosGetTimestampMs();
+ debugPrint("%s() LN%d, startTime= %"PRId64"\n",
+ __func__, __LINE__, startTime);
// read sample data from file first
- if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource,
- "sample", strlen("sample")))) {
- if (0 != prepareSampleDataForSTable(stbInfo)) {
- errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
- __func__, __LINE__);
- exit(EXIT_FAILURE);
- }
+ int ret;
+ if (stbInfo) {
+ ret = prepareSampleForStb(stbInfo);
+ } else {
+ ret = prepareSampleForNtb();
+ }
+
+ if (0 != ret) {
+ errorPrint2("%s() LN%d, prepare sample data for stable failed!\n",
+ __func__, __LINE__);
+ exit(EXIT_FAILURE);
}
TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == taos0) {
- errorPrint("%s() LN%d, connect to server fail , reason: %s\n",
+ errorPrint2("%s() LN%d, connect to server fail , reason: %s\n",
__func__, __LINE__, taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -7465,6 +9723,12 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|| ((stbInfo->childTblOffset
+ stbInfo->childTblLimit)
> (stbInfo->childTblCount))) {
+
+ if (stbInfo->childTblCount < stbInfo->childTblOffset) {
+ printf("WARNING: offset will not be used since the child tables count is less then offset!\n");
+
+ stbInfo->childTblOffset = 0;
+ }
stbInfo->childTblLimit =
stbInfo->childTblCount - stbInfo->childTblOffset;
}
@@ -7496,19 +9760,20 @@ static void startMultiThreadInsertData(int threads, char* db_name,
limit * TSDB_TABLE_NAME_LEN);
if (stbInfo->childTblName == NULL) {
taos_close(taos0);
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
int64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos0,
- db_name, stbInfo->sTblName,
+ db_name, stbInfo->stbName,
&stbInfo->childTblName, &childTblCount,
limit,
offset);
+ ntables = childTblCount; // CBD
} else {
- ntables = g_args.num_of_tables;
+ ntables = g_args.ntables;
tableFrom = 0;
}
@@ -7534,17 +9799,34 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- assert(pids != NULL);
-
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ assert(pids != NULL);
assert(infos != NULL);
- memset(pids, 0, threads * sizeof(pthread_t));
- memset(infos, 0, threads * sizeof(threadInfo));
-
-#if STMT_IFACE_ENABLED == 1
char *stmtBuffer = calloc(1, BUFFER_SIZE);
assert(stmtBuffer);
+
+#if STMT_BIND_PARAM_BATCH == 1
+ uint32_t interlaceRows = 0;
+ uint32_t batch;
+
+ if (stbInfo) {
+ if (stbInfo->interlaceRows < stbInfo->insertRows)
+ interlaceRows = stbInfo->interlaceRows;
+ } else {
+ if (g_args.interlaceRows < g_args.insertRows)
+ interlaceRows = g_args.interlaceRows;
+ }
+
+ if (interlaceRows > 0) {
+ batch = interlaceRows;
+ } else {
+ batch = (g_args.reqPerReq>g_args.insertRows)?
+ g_args.insertRows:g_args.reqPerReq;
+ }
+
+#endif
+
if ((g_args.iface == STMT_IFACE)
|| ((stbInfo)
&& (stbInfo->iface == STMT_IFACE))) {
@@ -7554,7 +9836,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&& (AUTO_CREATE_SUBTBL
== stbInfo->autoCreateTable)) {
pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?",
- stbInfo->sTblName);
+ stbInfo->stbName);
for (int tag = 0; tag < (stbInfo->tagCount - 1);
tag ++ ) {
pstr += sprintf(pstr, ",?");
@@ -7564,12 +9846,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
}
- int columnCount;
- if (stbInfo) {
- columnCount = stbInfo->columnCount;
- } else {
- columnCount = g_args.num_of_CPR;
- }
+ int columnCount = (stbInfo)?
+ stbInfo->columnCount:
+ g_args.columnCount;
for (int col = 0; col < columnCount; col ++) {
pstr += sprintf(pstr, ",?");
@@ -7577,13 +9856,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pstr += sprintf(pstr, ")");
debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer);
-
- if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource,
- "sample", strlen("sample")))) {
- parseSampleFileToStmt(stbInfo, timePrec);
- }
- }
+#if STMT_BIND_PARAM_BATCH == 1
+ parseSamplefileToStmtBatch(stbInfo);
#endif
+ }
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
@@ -7593,7 +9869,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->time_precision = timePrec;
pThreadInfo->stbInfo = stbInfo;
- pThreadInfo->start_time = start_time;
+ pThreadInfo->start_time = startTime;
pThreadInfo->minDelay = UINT64_MAX;
if ((NULL == stbInfo) ||
@@ -7604,42 +9880,54 @@ static void startMultiThreadInsertData(int threads, char* db_name,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == pThreadInfo->taos) {
free(infos);
- errorPrint(
+ errorPrint2(
"%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
-#if STMT_IFACE_ENABLED == 1
if ((g_args.iface == STMT_IFACE)
|| ((stbInfo)
&& (stbInfo->iface == STMT_IFACE))) {
-
pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos);
if (NULL == pThreadInfo->stmt) {
free(pids);
free(infos);
- errorPrint(
+ errorPrint2(
"%s() LN%d, failed init stmt, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
- int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0);
- if (ret != 0){
+ if (0 != taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0)) {
free(pids);
free(infos);
free(stmtBuffer);
- errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
+ errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
ret, taos_stmt_errstr(pThreadInfo->stmt));
exit(EXIT_FAILURE);
}
pThreadInfo->bind_ts = malloc(sizeof(int64_t));
- }
+
+ if (stbInfo) {
+#if STMT_BIND_PARAM_BATCH == 1
+ parseStbSampleToStmtBatchForThread(
+ pThreadInfo, stbInfo, timePrec, batch);
+#else
+ parseStbSampleToStmt(pThreadInfo, stbInfo, timePrec);
+#endif
+ } else {
+#if STMT_BIND_PARAM_BATCH == 1
+ parseNtbSampleToStmtBatchForThread(
+ pThreadInfo, timePrec, batch);
+#else
+ parseNtbSampleToStmt(pThreadInfo, timePrec);
#endif
+ }
+ }
} else {
pThreadInfo->taos = NULL;
}
@@ -7665,9 +9953,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
-#if STMT_IFACE_ENABLED == 1
free(stmtBuffer);
-#endif
+
+ int64_t start = taosGetTimestampUs();
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
@@ -7682,14 +9970,37 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
-#if STMT_IFACE_ENABLED == 1
+ tsem_destroy(&(pThreadInfo->lock_sem));
+ taos_close(pThreadInfo->taos);
+
if (pThreadInfo->stmt) {
taos_stmt_close(pThreadInfo->stmt);
- tmfree((char *)pThreadInfo->bind_ts);
+ }
+
+ tmfree((char *)pThreadInfo->bind_ts);
+#if STMT_BIND_PARAM_BATCH == 1
+ tmfree((char *)pThreadInfo->bind_ts_array);
+ tmfree(pThreadInfo->bindParams);
+ tmfree(pThreadInfo->is_null);
+#else
+ if (pThreadInfo->sampleBindArray) {
+ for (int k = 0; k < MAX_SAMPLES; k++) {
+ uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)(
+ pThreadInfo->sampleBindArray
+ + sizeof(uintptr_t *) * k));
+ int columnCount = (pThreadInfo->stbInfo)?
+ pThreadInfo->stbInfo->columnCount:
+ g_args.columnCount;
+ for (int c = 1; c < columnCount + 1; c++) {
+ TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c));
+ if (bind)
+ tmfree(bind->buffer);
+ }
+ tmfree((char *)tmp);
+ }
+ tmfree(pThreadInfo->sampleBindArray);
}
#endif
- tsem_destroy(&(pThreadInfo->lock_sem));
- taos_close(pThreadInfo->taos);
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
@@ -7708,48 +10019,44 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
}
- cntDelay -= 1;
if (cntDelay == 0) cntDelay = 1;
avgDelay = (double)totalDelay / cntDelay;
- int64_t end = taosGetTimestampMs();
+ int64_t end = taosGetTimestampUs();
int64_t t = end - start;
+ if (0 == t) t = 1;
- double tInMs = t/1000.0;
+ double tInMs = (double) t / 1000000.0;
if (stbInfo) {
- fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
tInMs, stbInfo->totalInsertRows,
stbInfo->totalAffectedRows,
- threads, db_name, stbInfo->sTblName,
- (tInMs)?
- (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX);
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
tInMs, stbInfo->totalInsertRows,
stbInfo->totalAffectedRows,
- threads, db_name, stbInfo->sTblName,
- (tInMs)?
- (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX);
+ threads, db_name, stbInfo->stbName,
+ (double)(stbInfo->totalInsertRows/tInMs));
}
} else {
- fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
tInMs, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
- (tInMs)?
- (double)(g_args.totalInsertRows/tInMs):FLT_MAX);
+ (double)(g_args.totalInsertRows/tInMs));
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
+ "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
tInMs, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
- (tInMs)?
- (double)(g_args.totalInsertRows/tInMs):FLT_MAX);
+ (double)(g_args.totalInsertRows/tInMs));
}
}
@@ -7770,37 +10077,46 @@ static void startMultiThreadInsertData(int threads, char* db_name,
free(infos);
}
-static void *readTable(void *sarg) {
-#if 1
+static void *queryNtableAggrFunc(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
- setThreadName("readTable");
+ setThreadName("queryNtableAggrFunc");
char *command = calloc(1, BUFFER_SIZE);
assert(command);
- uint64_t sTime = pThreadInfo->start_time;
+ uint64_t startTime = pThreadInfo->start_time;
char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
- errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
free(command);
return NULL;
}
- int64_t num_of_DPT;
+ int64_t insertRows;
/* if (pThreadInfo->stbInfo) {
- num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table;
+ insertRows = pThreadInfo->stbInfo->insertRows; // nrecords_per_table;
} else {
*/
- num_of_DPT = g_args.num_of_DPT;
+ insertRows = g_args.insertRows;
// }
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
+ bool aggr_func = g_Dbs.aggr_func;
+
+ char **aggreFunc;
+ int n;
+
+ if (g_args.demo_mode) {
+ aggreFunc = g_aggreFuncDemo;
+ n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
+ } else {
+ aggreFunc = g_aggreFunc;
+ n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ }
- int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
+ if (!aggr_func) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
}
printf("%"PRId64" records:\n", totalData);
@@ -7809,16 +10125,18 @@ static void *readTable(void *sarg) {
for (int j = 0; j < n; j++) {
double totalT = 0;
uint64_t count = 0;
- for (int64_t i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
- g_aggreFunc[j], tb_prefix, i, sTime);
+ for (int64_t i = 0; i < ntables; i++) {
+ sprintf(command, "SELECT %s FROM %s%"PRId64" WHERE ts>= %" PRIu64,
+ aggreFunc[j], tb_prefix, i, startTime);
- double t = taosGetTimestampMs();
+ double t = taosGetTimestampUs();
+ debugPrint("%s() LN%d, sql command: %s\n",
+ __func__, __LINE__, command);
TAOS_RES *pSql = taos_query(taos, command);
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
@@ -7830,29 +10148,27 @@ static void *readTable(void *sarg) {
count++;
}
- t = taosGetTimestampMs() - t;
+ t = taosGetTimestampUs() - t;
totalT += t;
taos_free_result(pSql);
}
fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
- g_aggreFunc[j][0] == '*' ? " * " : g_aggreFunc[j], totalData,
- (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
- printf("select %10s took %.6f second(s)\n", g_aggreFunc[j], totalT * 1000);
+ aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
+ (double)(ntables * insertRows) / totalT, totalT / 1000000);
+ printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT / 1000000);
}
fprintf(fp, "\n");
fclose(fp);
free(command);
-#endif
return NULL;
}
-static void *readMetric(void *sarg) {
-#if 1
+static void *queryStableAggrFunc(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
- setThreadName("readMetric");
+ setThreadName("queryStableAggrFunc");
char *command = calloc(1, BUFFER_SIZE);
assert(command);
@@ -7863,15 +10179,26 @@ static void *readMetric(void *sarg) {
return NULL;
}
- int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows;
- int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
- int64_t totalData = num_of_DPT * num_of_tables;
- bool do_aggreFunc = g_Dbs.do_aggreFunc;
+ int64_t insertRows = pThreadInfo->stbInfo->insertRows;
+ int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1;
+ int64_t totalData = insertRows * ntables;
+ bool aggr_func = g_Dbs.aggr_func;
- int n = do_aggreFunc ? (sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
- if (!do_aggreFunc) {
+ char **aggreFunc;
+ int n;
+
+ if (g_args.demo_mode) {
+ aggreFunc = g_aggreFuncDemo;
+ n = aggr_func?(sizeof(g_aggreFuncDemo) / sizeof(g_aggreFuncDemo[0])) : 2;
+ } else {
+ aggreFunc = g_aggreFunc;
+ n = aggr_func?(sizeof(g_aggreFunc) / sizeof(g_aggreFunc[0])) : 2;
+ }
+
+ if (!aggr_func) {
printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n");
}
+
printf("%"PRId64" records:\n", totalData);
fprintf(fp, "Querying On %"PRId64" records:\n", totalData);
@@ -7879,28 +10206,39 @@ static void *readMetric(void *sarg) {
char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
- int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
+ int64_t m = 10 < ntables ? 10 : ntables;
for (int64_t i = 1; i <= m; i++) {
if (i == 1) {
- sprintf(tempS, "t1 = %"PRId64"", i);
+ if (g_args.demo_mode) {
+ sprintf(tempS, "groupid = %"PRId64"", i);
+ } else {
+ sprintf(tempS, "t0 = %"PRId64"", i);
+ }
} else {
- sprintf(tempS, " or t1 = %"PRId64" ", i);
+ if (g_args.demo_mode) {
+ sprintf(tempS, " or groupid = %"PRId64" ", i);
+ } else {
+ sprintf(tempS, " or t0 = %"PRId64" ", i);
+ }
}
strncat(condition, tempS, COND_BUF_LEN - 1);
- sprintf(command, "select %s from meters where %s", g_aggreFunc[j], condition);
+ sprintf(command, "SELECT %s FROM meters WHERE %s", aggreFunc[j], condition);
printf("Where condition: %s\n", condition);
+
+ debugPrint("%s() LN%d, sql command: %s\n",
+ __func__, __LINE__, command);
fprintf(fp, "%s\n", command);
- double t = taosGetTimestampMs();
+ double t = taosGetTimestampUs();
TAOS_RES *pSql = taos_query(taos, command);
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
@@ -7911,11 +10249,11 @@ static void *readMetric(void *sarg) {
while(taos_fetch_row(pSql) != NULL) {
count++;
}
- t = taosGetTimestampMs() - t;
+ t = taosGetTimestampUs() - t;
fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n",
- num_of_tables * num_of_DPT / (t * 1000.0), t);
- printf("select %10s took %.6f second(s)\n\n", g_aggreFunc[j], t * 1000.0);
+ ntables * insertRows / (t / 1000), t);
+ printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t / 1000000);
taos_free_result(pSql);
}
@@ -7923,7 +10261,7 @@ static void *readMetric(void *sarg) {
}
fclose(fp);
free(command);
-#endif
+
return NULL;
}
@@ -7947,7 +10285,7 @@ static int insertTestProcess() {
debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile);
g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
if (NULL == g_fpOfInsertResult) {
- errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile);
+ errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile);
return -1;
}
@@ -7970,7 +10308,7 @@ static int insertTestProcess() {
}
free(cmdBuffer);
- // pretreatement
+ // pretreatment
if (prepareSampleData() != 0) {
if (g_fpOfInsertResult)
fclose(g_fpOfInsertResult);
@@ -7980,18 +10318,30 @@ static int insertTestProcess() {
double start;
double end;
- // create child tables
- start = taosGetTimestampMs();
- createChildTables();
- end = taosGetTimestampMs();
-
if (g_totalChildTables > 0) {
- fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ fprintf(stderr,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountForCreateTbl);
+ }
+
+ // create child tables
+ start = taosGetTimestampMs();
+ createChildTables();
+ end = taosGetTimestampMs();
+
+ fprintf(stderr,
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ "\nSpent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountForCreateTbl, g_actualChildTables);
}
}
@@ -8049,7 +10399,7 @@ static void *specifiedTableQuery(void *sarg) {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
@@ -8061,7 +10411,7 @@ static void *specifiedTableQuery(void *sarg) {
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint("use database %s failed!\n\n",
g_queryInfo.dbName);
return NULL;
}
@@ -8102,7 +10452,7 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t currentPrintTime = taosGetTimestampMs();
uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n",
__func__, __LINE__, endTs, startTs);
printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
@@ -8227,7 +10577,7 @@ static int queryTestProcess() {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -8235,7 +10585,7 @@ static int queryTestProcess() {
if (0 != g_queryInfo.superQueryInfo.sqlCount) {
getAllChildNameOfSuperTable(taos,
g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
+ g_queryInfo.superQueryInfo.stbName,
&g_queryInfo.superQueryInfo.childTblName,
&g_queryInfo.superQueryInfo.childTblCount);
}
@@ -8285,13 +10635,13 @@ static int queryTestProcess() {
taos_close(taos);
free(infos);
free(pids);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
return -1;
}
}
- pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL;// workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedTableQuery,
pThreadInfo);
@@ -8341,7 +10691,7 @@ static int queryTestProcess() {
pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
tableFrom = pThreadInfo->end_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
}
@@ -8368,7 +10718,7 @@ static int queryTestProcess() {
tmfree((char*)pidsOfSub);
tmfree((char*)infosOfSub);
- // taos_close(taos);// TODO: workaround to use separate taos connection;
+ // taos_close(taos);// workaround to use separate taos connection;
uint64_t endTs = taosGetTimestampMs();
uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
@@ -8383,27 +10733,27 @@ static int queryTestProcess() {
static void stable_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
if (param)
fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ // tao_unsubscribe() will free result.
}
static void specified_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
if (param)
fetchResult(res, (threadInfo *)param);
- // tao_unscribe() will free result.
+ // tao_unsubscribe() will free result.
}
static TAOS_SUB* subscribeImpl(
@@ -8435,7 +10785,7 @@ static TAOS_SUB* subscribeImpl(
}
if (tsub == NULL) {
- errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
return NULL;
}
@@ -8466,7 +10816,7 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
free(subSqlStr);
return NULL;
@@ -8477,7 +10827,7 @@ static void *superSubscribe(void *sarg) {
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
free(subSqlStr);
return NULL;
@@ -8613,7 +10963,7 @@ static void *specifiedSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
}
@@ -8720,7 +11070,7 @@ static int subscribeTestProcess() {
g_queryInfo.dbName,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -8728,12 +11078,12 @@ static int subscribeTestProcess() {
if (0 != g_queryInfo.superQueryInfo.sqlCount) {
getAllChildNameOfSuperTable(taos,
g_queryInfo.dbName,
- g_queryInfo.superQueryInfo.sTblName,
+ g_queryInfo.superQueryInfo.stbName,
&g_queryInfo.superQueryInfo.childTblName,
&g_queryInfo.superQueryInfo.childTblCount);
}
- taos_close(taos); // TODO: workaround to use separate taos connection;
+ taos_close(taos); // workaround to use separate taos connection;
pthread_t *pids = NULL;
threadInfo *infos = NULL;
@@ -8743,12 +11093,12 @@ static int subscribeTestProcess() {
//==== create threads for query for specified table
if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) {
- debugPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ debugPrint("%s() LN%d, specified query sqlCount %d.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount);
} else {
if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ errorPrint2("%s() LN%d, specified query sqlCount %d.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount);
exit(EXIT_FAILURE);
@@ -8765,7 +11115,7 @@ static int subscribeTestProcess() {
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -8775,7 +11125,7 @@ static int subscribeTestProcess() {
threadInfo *pThreadInfo = infos + seq;
pThreadInfo->threadID = seq;
pThreadInfo->querySeq = i;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
}
}
@@ -8800,7 +11150,7 @@ static int subscribeTestProcess() {
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n",
+ errorPrint2("%s() LN%d, malloc failed for create threads\n",
__func__, __LINE__);
// taos_close(taos);
exit(EXIT_FAILURE);
@@ -8832,7 +11182,7 @@ static int subscribeTestProcess() {
pThreadInfo->ntables = jend_table_to = jend_table_to + 1;
- pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
+ pThreadInfo->taos = NULL; // workaround to use separate taos connection;
pthread_create(pidsOfStable + seq,
NULL, superSubscribe, pThreadInfo);
}
@@ -8872,7 +11222,7 @@ static void initOfInsertMeta() {
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
g_Dbs.port = 6030;
tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);
g_Dbs.threadCount = 2;
g_Dbs.use_metric = g_args.use_metric;
@@ -8885,7 +11235,7 @@ static void initOfQueryMeta() {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
g_queryInfo.port = 6030;
tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE);
- tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
+ tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, SHELL_MAX_PASSWORD_LEN);
}
static void setParaFromArg() {
@@ -8899,14 +11249,14 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE);
}
- tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE);
+ tstrncpy(g_Dbs.password, g_args.password, SHELL_MAX_PASSWORD_LEN);
if (g_args.port) {
g_Dbs.port = g_args.port;
}
- g_Dbs.threadCount = g_args.num_of_threads;
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = true;
@@ -8918,27 +11268,27 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
g_Dbs.use_metric = g_args.use_metric;
- g_Dbs.insert_only = g_args.insert_only;
- g_Dbs.do_aggreFunc = true;
+ g_Dbs.aggr_func = g_args.aggr_func;
char dataString[TSDB_MAX_BYTES_PER_ROW];
- char **data_type = g_args.datatype;
+ char *data_type = g_args.data_type;
+ char **dataType = g_args.dataType;
memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW);
- if (strcasecmp(data_type[0], "BINARY") == 0
- || strcasecmp(data_type[0], "BOOL") == 0
- || strcasecmp(data_type[0], "NCHAR") == 0 ) {
- g_Dbs.do_aggreFunc = false;
+ if ((data_type[0] == TSDB_DATA_TYPE_BINARY)
+ || (data_type[0] == TSDB_DATA_TYPE_BOOL)
+ || (data_type[0] == TSDB_DATA_TYPE_NCHAR)) {
+ g_Dbs.aggr_func = false;
}
if (g_args.use_metric) {
g_Dbs.db[0].superTblCount = 1;
- tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN);
- g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
- g_Dbs.threadCount = g_args.num_of_threads;
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ tstrncpy(g_Dbs.db[0].superTbls[0].stbName, "meters", TSDB_TABLE_NAME_LEN);
+ g_Dbs.db[0].superTbls[0].childTblCount = g_args.ntables;
+ g_Dbs.threadCount = g_args.nthreads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.asyncMode = g_args.async_mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;
@@ -8958,26 +11308,28 @@ static void setParaFromArg() {
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step;
- g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT;
+ g_Dbs.db[0].superTbls[0].insertRows = g_args.insertRows;
g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len;
g_Dbs.db[0].superTbls[0].columnCount = 0;
for (int i = 0; i < MAX_NUM_COLUMNS; i++) {
- if (data_type[i] == NULL) {
+ if (data_type[i] == TSDB_DATA_TYPE_NULL) {
break;
}
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = data_type[i];
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
- data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1));
- g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
+ dataType[i], min(DATATYPE_BUFF_LEN, strlen(dataType[i]) + 1));
+ g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.binwidth;
g_Dbs.db[0].superTbls[0].columnCount++;
}
- if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) {
- g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
+ if (g_Dbs.db[0].superTbls[0].columnCount > g_args.columnCount) {
+ g_Dbs.db[0].superTbls[0].columnCount = g_args.columnCount;
} else {
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
- i < g_args.num_of_CPR; i++) {
+ i < g_args.columnCount; i++) {
+ g_Dbs.db[0].superTbls[0].columns[i].data_type = TSDB_DATA_TYPE_INT;
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
"INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1));
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
@@ -8991,10 +11343,10 @@ static void setParaFromArg() {
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
"BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1));
- g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
+ g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.binwidth;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
- g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
+ g_Dbs.threadCountForCreateTbl = g_args.nthreads;
g_Dbs.db[0].superTbls[0].tagCount = 0;
}
}
@@ -9066,7 +11418,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
memcpy(cmd + cmd_len, line, read_len);
if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
- errorPrint("%s() LN%d, queryDbExec %s failed!\n",
+ errorPrint2("%s() LN%d, queryDbExec %s failed!\n",
__func__, __LINE__, cmd);
tmfree(cmd);
tmfree(line);
@@ -9110,16 +11462,15 @@ static void testMetaFile() {
}
}
-static void queryResult() {
+static void queryAggrFunc() {
// query data
pthread_t read_id;
threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
assert(pThreadInfo);
- pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
+ pThreadInfo->start_time = DEFAULT_START_TIME; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;
- //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
if (g_args.use_metric) {
pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
@@ -9127,8 +11478,8 @@ static void queryResult() {
tstrncpy(pThreadInfo->tb_prefix,
g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN);
} else {
- pThreadInfo->ntables = g_args.num_of_tables;
- pThreadInfo->end_table_to = g_args.num_of_tables -1;
+ pThreadInfo->ntables = g_args.ntables;
+ pThreadInfo->end_table_to = g_args.ntables -1;
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
}
@@ -9140,7 +11491,7 @@ static void queryResult() {
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
free(pThreadInfo);
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -9148,9 +11499,9 @@ static void queryResult() {
tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
if (!g_Dbs.use_metric) {
- pthread_create(&read_id, NULL, readTable, pThreadInfo);
+ pthread_create(&read_id, NULL, queryNtableAggrFunc, pThreadInfo);
} else {
- pthread_create(&read_id, NULL, readMetric, pThreadInfo);
+ pthread_create(&read_id, NULL, queryStableAggrFunc, pThreadInfo);
}
pthread_join(read_id, NULL);
taos_close(pThreadInfo->taos);
@@ -9162,7 +11513,7 @@ static void testCmdLine() {
if (strlen(configDir)) {
wordexp_t full_path;
if (wordexp(configDir, &full_path, 0) != 0) {
- errorPrint( "Invalid path %s\n", configDir);
+ errorPrint("Invalid path %s\n", configDir);
return;
}
taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
@@ -9172,8 +11523,9 @@ static void testCmdLine() {
g_args.test_mode = INSERT_TEST;
insertTestProcess();
- if (false == g_Dbs.insert_only)
- queryResult();
+ if (g_Dbs.aggr_func) {
+ queryAggrFunc();
+ }
}
int main(int argc, char *argv[]) {
@@ -9182,6 +11534,7 @@ int main(int argc, char *argv[]) {
debugPrint("meta file: %s\n", g_args.metaFile);
if (g_args.metaFile) {
+ g_totalChildTables = 0;
initOfInsertMeta();
initOfQueryMeta();
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
index 51f4748eab462c8e883e83cd5923f38dd7fb9b5a..c3c914e96fc096f59aa701d3496455c754356aa8 100644
--- a/src/kit/taosdump/CMakeLists.txt
+++ b/src/kit/taosdump/CMakeLists.txt
@@ -9,9 +9,9 @@ AUX_SOURCE_DIRECTORY(. SRC)
IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdump taos_static)
+ TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
ELSE ()
- TARGET_LINK_LIBRARIES(taosdump taos)
+ TARGET_LINK_LIBRARIES(taosdump taos cJson)
ENDIF ()
ENDIF ()
@@ -19,8 +19,8 @@ IF (TD_DARWIN)
# missing for macosx
# ADD_EXECUTABLE(taosdump ${SRC})
# IF (TD_SOMODE_STATIC)
- # TARGET_LINK_LIBRARIES(taosdump taos_static)
+ # TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
# ELSE ()
- # TARGET_LINK_LIBRARIES(taosdump taos)
+ # TARGET_LINK_LIBRARIES(taosdump taos cJson)
# ENDIF ()
ENDIF ()
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index bea6e6510683e3a400623c2ef843d0ce6e34521c..2f5bf8f6d52bb9e4cc8fabdcbaa727b014f88e64 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -60,8 +60,29 @@ typedef struct {
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
- do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0)
+ do { fprintf(stderr, "\033[31m"); \
+ fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); } while(0)
+#define okPrint(fmt, ...) \
+ do { fprintf(stderr, "\033[32m"); \
+ fprintf(stderr, "OK: "fmt, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); } while(0)
+
+static bool isStringNumber(char *input)
+{
+ int len = strlen(input);
+ if (0 == len) {
+ return false;
+ }
+
+ for (int i = 0; i < len; i++) {
+ if (!isdigit(input[i]))
+ return false;
+ }
+
+ return true;
+}
// -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index {
@@ -167,6 +188,7 @@ typedef struct {
int32_t threadIndex;
int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN];
+ int precision;
void *taosCon;
int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut;
@@ -195,7 +217,7 @@ static char doc[] = "";
/* to force a line-break, e.g.\n<-- here."; */
/* A description of the arguments we accept. */
-static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i inpath\n-o outpath";
+static char args_doc[] = "dbname [tbname ...]\n--databases db1,db2,... \n--all-databases\n-i inpath\n-o outpath";
/* Keys for options without short-options. */
#define OPT_ABORT 1 /* –abort */
@@ -206,45 +228,37 @@ static struct argp_option options[] = {
{"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0},
{"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0},
#ifdef _TD_POWER_
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0},
+ {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0},
#else
- {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0},
+ {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0},
#endif
{"port", 'P', "PORT", 0, "Port to connect", 0},
- {"cversion", 'v', "CVERION", 0, "client version", 0},
{"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
// input/output file
{"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
{"inpath", 'i', "INPATH", 0, "Input file path.", 1},
{"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
#ifdef _TD_POWER_
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
+ {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
#else
- {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
+ {"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
#endif
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
// dump unit options
{"all-databases", 'A', 0, 0, "Dump all databases.", 2},
- {"databases", 'D', 0, 0, "Dump assigned databases", 2},
+ {"databases", 'D', "DATABASES", 0, "Dump inputed databases. Use comma to seprate databases\' name.", 2},
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2},
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
- {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
+ {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
-#if TSDB_SUPPORT_NANOSECOND == 1
- {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6},
-#else
- {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6},
-#endif
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
{"debug", 'g', 0, 0, "Print debug info.", 8},
- {"verbose", 'b', 0, 0, "Print verbose debug info.", 9},
- {"performanceprint", 'm', 0, 0, "Print performance debug info.", 10},
{0}
};
@@ -253,9 +267,8 @@ typedef struct arguments {
// connection option
char *host;
char *user;
- char *password;
+ char password[SHELL_MAX_PASSWORD_LEN];
uint16_t port;
- char cversion[12];
uint16_t mysqlFlag;
// output file
char outpath[MAX_FILE_NAME_LEN];
@@ -266,13 +279,17 @@ typedef struct arguments {
// dump unit option
bool all_databases;
bool databases;
+ char *databasesSeq;
// dump format option
bool schemaonly;
bool with_property;
bool avro;
int64_t start_time;
+ char humanStartTime[28];
int64_t end_time;
+ char humanEndTime[28];
char precision[8];
+
int32_t data_batch;
int32_t max_sql_len;
int32_t table_batch; // num of table which will be dump into one output file.
@@ -286,6 +303,8 @@ typedef struct arguments {
bool debug_print;
bool verbose_print;
bool performance_print;
+
+ int dbCount;
} SArguments;
/* Our argp parser. */
@@ -308,13 +327,17 @@ static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols,
static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric,
int numOfCols, FILE *fp, char* dbName);
static int32_t taosDumpTable(char *tbName, char *metric,
- FILE *fp, TAOS* taosCon, char* dbName);
+ FILE *fp, TAOS* taosCon, char* dbName, int precision);
static int taosDumpTableData(FILE *fp, char *tbName,
TAOS* taosCon, char* dbName,
+ int precision,
char *jsonAvroSchema);
static int taosCheckParam(struct arguments *arguments);
static void taosFreeDbInfos();
-static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName);
+static void taosStartDumpOutWorkThreads(
+ int32_t numOfThread,
+ char *dbName,
+ int precision);
struct arguments g_args = {
// connection option
@@ -326,7 +349,6 @@ struct arguments g_args = {
"taosdata",
#endif
0,
- "",
0,
// outpath and inpath
"",
@@ -334,14 +356,17 @@ struct arguments g_args = {
"./dump_result.txt",
NULL,
// dump unit option
- false,
- false,
+ false, // all_databases
+ false, // databases
+ NULL, // databasesSeq
// dump format option
- false, // schemeonly
+ false, // schemaonly
true, // with_property
false, // avro format
- -INT64_MAX, // start_time
+ -INT64_MAX + 1, // start_time
+ {0}, // humanStartTime
INT64_MAX, // end_time
+ {0}, // humanEndTime
"ms", // precision
1, // data_batch
TSDB_MAX_SQL_LEN, // max_sql_len
@@ -355,9 +380,49 @@ struct arguments g_args = {
false, // isDumpIn
false, // debug_print
false, // verbose_print
- false // performance_print
+ false, // performance_print
+ 0, // dbCount
};
+UNUSED_FUNC void errorWrongValue(char *program, char *wrong_arg, char *wrong_value)
+{
+ fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorUnrecognized(char *program, char *wrong_arg)
+{
+ fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg);
+ fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires an argument -- '%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg2(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option requires a number argument '-%s'\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
+static void errorPrintReqArg3(char *program, char *wrong_arg)
+{
+ fprintf(stderr,
+ "%s: option '%s' requires an argument\n",
+ program, wrong_arg);
+ fprintf(stderr,
+ "Try `taosdemo --help' or `taosdemo --usage' for more information.\n");
+}
+
/* Parse a single option. */
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
/* Get the input argument from argp_parse, which we
@@ -376,23 +441,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.user = arg;
break;
case 'p':
- g_args.password = arg;
break;
case 'P':
+ if (!isStringNumber(arg)) {
+ errorPrintReqArg2("taosdump", "P");
+ exit(EXIT_FAILURE);
+ }
g_args.port = atoi(arg);
break;
case 'q':
g_args.mysqlFlag = atoi(arg);
break;
- case 'v':
- if (wordexp(arg, &full_path, 0) != 0) {
- errorPrint("Invalid client vesion %s\n", arg);
- return -1;
- }
- tstrncpy(g_args.cversion, full_path.we_wordv[0], 11);
- wordfree(&full_path);
- break;
- // output file path
case 'o':
if (wordexp(arg, &full_path, 0) != 0) {
errorPrint("Invalid path %s\n", arg);
@@ -419,9 +478,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.resultFile = arg;
break;
case 'c':
+ if (0 == strlen(arg)) {
+ errorPrintReqArg3("taosdump", "-c or --config-dir");
+ exit(EXIT_FAILURE);
+ }
if (wordexp(arg, &full_path, 0) != 0) {
errorPrint("Invalid path %s\n", arg);
- return -1;
+ exit(EXIT_FAILURE);
}
tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN);
wordfree(&full_path);
@@ -431,7 +494,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break;
// dump unit option
case 'A':
- g_args.all_databases = true;
break;
case 'D':
g_args.databases = true;
@@ -443,17 +505,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'N':
g_args.with_property = false;
break;
- case 'V':
+ case 'v':
g_args.avro = true;
break;
case 'S':
// parse time here.
- g_args.start_time = atol(arg);
break;
case 'E':
- g_args.end_time = atol(arg);
- break;
- case 'C':
break;
case 'B':
g_args.data_batch = atoi(arg);
@@ -476,6 +534,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.table_batch = atoi(arg);
break;
case 'T':
+ if (!isStringNumber(arg)) {
+ errorPrint("%s", "\n\t-T need a number following!\n");
+ exit(EXIT_FAILURE);
+ }
g_args.thread_num = atoi(arg);
break;
case OPT_ABORT:
@@ -522,7 +584,7 @@ static int queryDbImpl(TAOS *taos, char *command) {
return 0;
}
-static void parse_precision_first(
+UNUSED_FUNC static void parse_precision_first(
int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-C") == 0) {
@@ -554,6 +616,135 @@ static void parse_precision_first(
}
}
+static void parse_args(
+ int argc, char *argv[], SArguments *arguments) {
+
+ for (int i = 1; i < argc; i++) {
+ if ((strncmp(argv[i], "-p", 2) == 0)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ if ((strlen(argv[i]) == 2)
+ || (strncmp(argv[i], "--password", 10) == 0)) {
+ printf("Enter password: ");
+ taosSetConsoleEcho(false);
+ if(scanf("%20s", arguments->password) > 1) {
+ errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__);
+ }
+ taosSetConsoleEcho(true);
+ } else {
+ tstrncpy(arguments->password, (char *)(argv[i] + 2),
+ SHELL_MAX_PASSWORD_LEN);
+ strcpy(argv[i], "-p");
+ }
+ } else if (strcmp(argv[i], "-gg") == 0) {
+ arguments->verbose_print = true;
+ strcpy(argv[i], "");
+ } else if (strcmp(argv[i], "-PP") == 0) {
+ arguments->performance_print = true;
+ strcpy(argv[i], "");
+ } else if ((strcmp(argv[i], "-A") == 0)
+ || (0 == strncmp(
+ argv[i], "--all-database",
+ strlen("--all-database")))) {
+ g_args.all_databases = true;
+ } else if ((strncmp(argv[i], "-D", strlen("-D")) == 0)
+ || (0 == strncmp(
+ argv[i], "--database",
+ strlen("--database")))) {
+ if (2 == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg(argv[0], "D");
+ exit(EXIT_FAILURE);
+ }
+ arguments->databasesSeq = argv[++i];
+ } else if (0 == strncmp(argv[i], "--databases=", strlen("--databases="))) {
+ arguments->databasesSeq = (char *)(argv[i] + strlen("--databases="));
+ } else if (0 == strncmp(argv[i], "-D", strlen("-D"))) {
+ arguments->databasesSeq = (char *)(argv[i] + strlen("-D"));
+ } else if (strlen("--databases") == strlen(argv[i])) {
+ if (argc == i+1) {
+ errorPrintReqArg3(argv[0], "--databases");
+ exit(EXIT_FAILURE);
+ }
+ arguments->databasesSeq = argv[++i];
+ } else {
+ errorUnrecognized(argv[0], argv[i]);
+ exit(EXIT_FAILURE);
+ }
+ g_args.databases = true;
+ } else {
+ continue;
+ }
+
+ }
+}
+
+static void copyHumanTimeToArg(char *timeStr, bool isStartTime)
+{
+ if (isStartTime)
+ strcpy(g_args.humanStartTime, timeStr);
+ else
+ strcpy(g_args.humanEndTime, timeStr);
+}
+
+static void copyTimestampToArg(char *timeStr, bool isStartTime)
+{
+ if (isStartTime)
+ g_args.start_time = atol(timeStr);
+ else
+ g_args.end_time = atol(timeStr);
+}
+
+static void parse_timestamp(
+ int argc, char *argv[], SArguments *arguments) {
+ for (int i = 1; i < argc; i++) {
+ char *tmp;
+ bool isStartTime = false;
+ bool isEndTime = false;
+
+ if (strcmp(argv[i], "-S") == 0) {
+ isStartTime = true;
+ } else if (strcmp(argv[i], "-E") == 0) {
+ isEndTime = true;
+ }
+
+ if (isStartTime || isEndTime) {
+ if (NULL == argv[i+1]) {
+ errorPrint("%s need a valid value following!\n", argv[i]);
+ exit(-1);
+ }
+ tmp = strdup(argv[i+1]);
+
+ if (strchr(tmp, ':') && strchr(tmp, '-')) {
+ copyHumanTimeToArg(tmp, isStartTime);
+ } else {
+ copyTimestampToArg(tmp, isStartTime);
+ }
+ }
+ }
+}
+
+static int getPrecisionByString(char *precision)
+{
+ if (0 == strncasecmp(precision,
+ "ms", 2)) {
+ return TSDB_TIME_PRECISION_MILLI;
+ } else if (0 == strncasecmp(precision,
+ "us", 2)) {
+ return TSDB_TIME_PRECISION_MICRO;
+#if TSDB_SUPPORT_NANOSECOND == 1
+ } else if (0 == strncasecmp(precision,
+ "ns", 2)) {
+ return TSDB_TIME_PRECISION_NANO;
+#endif
+ } else {
+ errorPrint("Invalid time precision: %s",
+ precision);
+ }
+
+ return -1;
+}
+
+/*
static void parse_timestamp(
int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
@@ -572,6 +763,7 @@ static void parse_timestamp(
int64_t tmpEpoch;
if (strchr(tmp, ':') && strchr(tmp, '-')) {
+ strcpy(g_args.humanStartTime, tmp)
int32_t timePrec;
if (0 == strncasecmp(arguments->precision,
"ms", strlen("ms"))) {
@@ -610,15 +802,20 @@ static void parse_timestamp(
}
}
}
+*/
int main(int argc, char *argv[]) {
+ static char verType[32] = {0};
+ sprintf(verType, "version: %s\n", version);
+ argp_program_version = verType;
int ret = 0;
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
- if (argc > 2) {
- parse_precision_first(argc, argv, &g_args);
+ if (argc > 1) {
+// parse_precision_first(argc, argv, &g_args);
parse_timestamp(argc, argv, &g_args);
+ parse_args(argc, argv, &g_args);
}
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@@ -637,7 +834,6 @@ int main(int argc, char *argv[]) {
printf("user: %s\n", g_args.user);
printf("password: %s\n", g_args.password);
printf("port: %u\n", g_args.port);
- printf("cversion: %s\n", g_args.cversion);
printf("mysqlFlag: %d\n", g_args.mysqlFlag);
printf("outpath: %s\n", g_args.outpath);
printf("inpath: %s\n", g_args.inpath);
@@ -645,11 +841,14 @@ int main(int argc, char *argv[]) {
printf("encode: %s\n", g_args.encode);
printf("all_databases: %s\n", g_args.all_databases?"true":"false");
printf("databases: %d\n", g_args.databases);
+ printf("databasesSeq: %s\n", g_args.databasesSeq);
printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
printf("with_property: %s\n", g_args.with_property?"true":"false");
printf("avro format: %s\n", g_args.avro?"true":"false");
printf("start_time: %" PRId64 "\n", g_args.start_time);
+ printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time);
+ printf("human readable end time: %s \n", g_args.humanEndTime);
printf("precision: %s\n", g_args.precision);
printf("data_batch: %d\n", g_args.data_batch);
printf("max_sql_len: %d\n", g_args.max_sql_len);
@@ -666,11 +865,6 @@ int main(int argc, char *argv[]) {
}
}
printf("==============================\n");
-
- if (g_args.cversion[0] != 0){
- tstrncpy(version, g_args.cversion, 11);
- }
-
if (taosCheckParam(&g_args) < 0) {
exit(EXIT_FAILURE);
}
@@ -688,7 +882,6 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "user: %s\n", g_args.user);
fprintf(g_fpOfResult, "password: %s\n", g_args.password);
fprintf(g_fpOfResult, "port: %u\n", g_args.port);
- fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
@@ -696,11 +889,14 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "encode: %s\n", g_args.encode);
fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false");
fprintf(g_fpOfResult, "databases: %d\n", g_args.databases);
+ fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
+ fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
+ fprintf(g_fpOfResult, "human readable end time: %s \n", g_args.humanEndTime);
fprintf(g_fpOfResult, "precision: %s\n", g_args.precision);
fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch);
fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len);
@@ -757,7 +953,8 @@ int main(int argc, char *argv[]) {
static void taosFreeDbInfos() {
if (g_dbInfos == NULL) return;
- for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]);
+ for (int i = 0; i < g_args.dbCount; i++)
+ tfree(g_dbInfos[i]);
tfree(g_dbInfos);
}
@@ -987,6 +1184,90 @@ static int32_t taosSaveTableOfMetricToTempFile(
return 0;
}
+static int inDatabasesSeq(
+ char *name,
+ int len)
+{
+ if (strstr(g_args.databasesSeq, ",") == NULL) {
+ if (0 == strncmp(g_args.databasesSeq, name, len)) {
+ return 0;
+ }
+ } else {
+ char *dupSeq = strdup(g_args.databasesSeq);
+ char *running = dupSeq;
+ char *dbname = strsep(&running, ",");
+ while (dbname) {
+ if (0 == strncmp(dbname, name, len)) {
+ tfree(dupSeq);
+ return 0;
+ }
+
+ dbname = strsep(&running, ",");
+ }
+
+ }
+
+ return -1;
+}
+
+static int getDbCount()
+{
+ int count = 0;
+
+ TAOS *taos = NULL;
+ TAOS_RES *result = NULL;
+ char *command = "show databases";
+ TAOS_ROW row;
+
+ /* Connect to server */
+ taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (NULL == taos) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ return 0;
+ }
+
+ result = taos_query(taos, command);
+ int32_t code = taos_errno(result);
+
+ if (0 != code) {
+ errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(result));
+ return 0;
+ }
+
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+
+ while ((row = taos_fetch_row(result)) != NULL) {
+ // sys database name : 'log', but subsequent version changed to 'log'
+ if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ && (!g_args.allow_sys)) {
+ continue;
+ }
+
+ if (g_args.databases) { // input multi dbs
+ if (inDatabasesSeq(
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
+ continue;
+ } else if (!g_args.all_databases) { // only input one db
+ if (strncasecmp(g_args.arg_list[0],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
+ continue;
+ }
+
+ count++;
+ }
+
+ if (count == 0) {
+ errorPrint("%d databases valid to dump\n", count);
+ }
+
+ return count;
+}
+
static int taosDumpOut() {
TAOS *taos = NULL;
TAOS_RES *result = NULL;
@@ -1011,7 +1292,14 @@ static int taosDumpOut() {
return -1;
}
- g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *));
+ g_args.dbCount = getDbCount();
+
+ if (0 == g_args.dbCount) {
+ errorPrint("%d databases valid to dump\n", g_args.dbCount);
+ return -1;
+ }
+
+ g_dbInfos = (SDbInfo **)calloc(g_args.dbCount, sizeof(SDbInfo *));
if (g_dbInfos == NULL) {
errorPrint("%s() LN%d, failed to allocate memory\n",
__func__, __LINE__);
@@ -1058,24 +1346,18 @@ static int taosDumpOut() {
}
if (g_args.databases) { // input multi dbs
- for (int i = 0; g_args.arg_list[i]; i++) {
- if (strncasecmp(g_args.arg_list[i],
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
+ if (inDatabasesSeq(
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
+ continue;
}
- continue;
} else if (!g_args.all_databases) { // only input one db
if (strncasecmp(g_args.arg_list[0],
(char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- goto _dump_db_point;
- else
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
continue;
}
-_dump_db_point:
-
g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (g_dbInfos[count] == NULL) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
@@ -1083,38 +1365,55 @@ _dump_db_point:
goto _exit_failure;
}
+ okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
+ min(TSDB_DB_NAME_LEN,
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
- g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- g_dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
-
- tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ g_dbInfos[count]->ntables =
+ *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ g_dbInfos[count]->vgroups =
+ *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ g_dbInfos[count]->replica =
+ *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ g_dbInfos[count]->quorum =
+ *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ g_dbInfos[count]->days =
+ *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+
+ tstrncpy(g_dbInfos[count]->keeplist,
+ (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
- g_dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- g_dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- g_dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- g_dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- g_dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- g_dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
-
- tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes + 1));
- //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
- g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
+ g_dbInfos[count]->cache =
+ *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ g_dbInfos[count]->blocks =
+ *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ g_dbInfos[count]->minrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ g_dbInfos[count]->maxrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ g_dbInfos[count]->wallevel =
+ *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ g_dbInfos[count]->fsync =
+ *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ g_dbInfos[count]->comp =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ g_dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+
+ tstrncpy(g_dbInfos[count]->precision,
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ DB_PRECISION_LEN);
+ g_dbInfos[count]->update =
+ *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
count++;
if (g_args.databases) {
- if (count > g_args.arg_list_len) break;
+ if (count > g_args.dbCount) break;
} else if (!g_args.all_databases) {
if (count >= 1) break;
@@ -1131,7 +1430,7 @@ _dump_db_point:
taosDumpDb(g_dbInfos[i], fp, taos);
}
} else {
- if (g_args.arg_list_len == 1) { // case: taosdump
+ if (g_args.dbCount == 1) { // case: taosdump
taosDumpDb(g_dbInfos[0], fp, taos);
} else { // case: taosdump tablex tabley ...
taosDumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
@@ -1150,14 +1449,14 @@ _dump_db_point:
fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name);
- int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0
+ int32_t totalNumOfThread = 1; // 0: all normal table into .tables.tmp.0
int normalTblFd = -1;
int32_t retCode;
int superTblCnt = 0 ;
for (int i = 1; g_args.arg_list[i]; i++) {
if (taosGetTableRecordInfo(g_args.arg_list[i],
&tableRecordInfo, taos) < 0) {
- errorPrint("input the invalide table %s\n",
+ errorPrint("input the invalid table %s\n",
g_args.arg_list[i]);
continue;
}
@@ -1204,8 +1503,10 @@ _dump_db_point:
}
// start multi threads to dumpout
+
taosStartDumpOutWorkThreads(totalNumOfThread,
- g_dbInfos[0]->name);
+ g_dbInfos[0]->name,
+ getPrecisionByString(g_dbInfos[0]->precision));
char tmpFileName[MAX_FILE_NAME_LEN];
_clean_tmp_file:
@@ -1282,11 +1583,10 @@ static int taosGetTableDes(
return count;
}
- // if chidl-table have tag, using select tagName from table to get tagValue
+ // if child-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(stableDes->cols[i].note, "TAG") != 0) continue;
-
sprintf(sqlstr, "select %s from %s.%s",
stableDes->cols[i].field, dbName, table);
@@ -1395,7 +1695,7 @@ static int convertSchemaToAvroSchema(STableDef *stableDes, char **avroSchema)
static int32_t taosDumpTable(
char *tbName, char *metric,
- FILE *fp, TAOS* taosCon, char* dbName) {
+ FILE *fp, TAOS* taosCon, char* dbName, int precision) {
int count = 0;
STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef)
@@ -1446,7 +1746,7 @@ static int32_t taosDumpTable(
int32_t ret = 0;
if (!g_args.schemaonly) {
- ret = taosDumpTableData(fp, tbName, taosCon, dbName,
+ ret = taosDumpTableData(fp, tbName, taosCon, dbName, precision,
jsonAvroSchema);
}
@@ -1537,7 +1837,8 @@ static void* taosDumpOutWorkThreadFp(void *arg)
int ret = taosDumpTable(
tableRecord.name, tableRecord.metric,
- fp, pThread->taosCon, pThread->dbName);
+ fp, pThread->taosCon, pThread->dbName,
+ pThread->precision);
if (ret >= 0) {
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
@@ -1586,7 +1887,7 @@ static void* taosDumpOutWorkThreadFp(void *arg)
return NULL;
}
-static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
+static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName, int precision)
{
pthread_attr_t thattr;
SThreadParaObj *threadObj =
@@ -1605,6 +1906,7 @@ static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName)
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN);
+ pThread->precision = precision;
pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port);
if (pThread->taosCon == NULL) {
@@ -1854,7 +2156,8 @@ static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) {
}
// start multi threads to dumpout
- taosStartDumpOutWorkThreads(numOfThread, dbInfo->name);
+ taosStartDumpOutWorkThreads(numOfThread, dbInfo->name,
+ getPrecisionByString(dbInfo->precision));
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
@@ -2132,14 +2435,38 @@ static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbN
}
static int taosDumpTableData(FILE *fp, char *tbName,
- TAOS* taosCon, char* dbName,
+ TAOS* taosCon, char* dbName, int precision,
char *jsonAvroSchema) {
int64_t totalRows = 0;
char sqlstr[1024] = {0};
+
+ int64_t start_time, end_time;
+ if (strlen(g_args.humanStartTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n", g_args.humanStartTime);
+ return -1;
+ }
+ } else {
+ start_time = g_args.start_time;
+ }
+
+ if (strlen(g_args.humanEndTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
+ return -1;
+ }
+ } else {
+ end_time = g_args.end_time;
+ }
+
sprintf(sqlstr,
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbName, g_args.start_time, g_args.end_time);
+ dbName, tbName, start_time, end_time);
TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
@@ -2172,12 +2499,6 @@ static int taosCheckParam(struct arguments *arguments) {
return -1;
}
- if (g_args.arg_list_len == 0) {
- if ((!g_args.all_databases) && (!g_args.isDumpIn)) {
- errorPrint("%s", "taosdump requires parameters for database and operation\n");
- return -1;
- }
- }
/*
if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
fprintf(stderr, "duplicate parameter input and output file path\n");
@@ -2384,7 +2705,7 @@ static int taosGetFilesNum(const char *directoryName,
}
if (fileNum <= 0) {
- errorPrint("directory:%s is empry\n", directoryName);
+ errorPrint("directory:%s is empty\n", directoryName);
exit(-1);
}
@@ -2561,9 +2882,9 @@ static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset,
memcpy(cmd + cmd_len, line, read_len);
cmd[read_len + cmd_len]= '\0';
if (queryDbImpl(taos, cmd)) {
- errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n",
+ errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
__func__, __LINE__, lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName);
+ fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c
index 33d779dfcf7abd1315107fd45f79eaed57581768..ddb9e660af4b4c479c0d8bc4b8be47c9f900dfce 100644
--- a/src/kit/taospack/taospack.c
+++ b/src/kit/taospack/taospack.c
@@ -18,6 +18,7 @@
#include
#include
+
#if defined(WINDOWS)
int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3 for windows.\n");
@@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
if ( ++fi == malloc_cnt ) {
malloc_cnt += 100000;
- floats = realloc(floats, malloc_cnt*sizeof(float));
+ float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
+ if(floats1 == NULL)
+ break;
+ floats = floats1;
}
memset(buf, 0, sizeof(buf));
}
@@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){
}
-
void unitTestFloat() {
float ft1 [] = {1.11, 2.22, 3.333};
@@ -662,7 +665,50 @@ void unitTestFloat() {
free(ft2);
free(buff);
free(output);
-
+}
+
+void leakFloat() {
+
+ int cnt = sizeof(g_ft1)/sizeof(float);
+ float* floats = g_ft1;
+ int algorithm = 2;
+
+ // compress
+ const char* input = (const char*)floats;
+ int input_len = cnt * sizeof(float);
+ int output_len = input_len + 1024;
+ char* output = (char*) malloc(output_len);
+ char* buff = (char*) malloc(input_len);
+ int buff_len = input_len;
+
+ int ret_len = 0;
+ ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len);
+
+ if(ret_len == 0) {
+ printf(" compress float error.\n");
+ free(buff);
+ free(output);
+ return ;
+ }
+
+ float* ft2 = (float*)malloc(input_len);
+ ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len);
+ if(ret_len == 0) {
+ printf(" decompress float error.\n");
+ }
+
+ free(ft2);
+ free(buff);
+ free(output);
+}
+
+
+void leakTest(){
+ for(int i=0; i< 90000000000000; i++){
+ if(i%10000==0)
+ printf(" ---------- %d ---------------- \n", i);
+ leakFloat();
+ }
}
#define DB_CNT 500
@@ -689,7 +735,7 @@ extern char Compressor [];
// ----------------- main ----------------------
//
int main(int argc, char *argv[]) {
- printf("welcome to use taospack tools v1.3\n");
+ printf("welcome to use taospack tools v1.6\n");
//printf(" sizeof(int)=%d\n", (int)sizeof(int));
//printf(" sizeof(long)=%d\n", (int)sizeof(long));
@@ -753,6 +799,9 @@ int main(int argc, char *argv[]) {
if(strcmp(argv[1], "-mem") == 0) {
memTest();
}
+ else if(strcmp(argv[1], "-leak") == 0) {
+ leakTest();
+ }
}
else{
unitTestFloat();
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index 5521267841f06d139ef8b4ac7a0c8883774e667f..5acc8dd85eb7fe8cd3f4b17f47e06161e39a6dc4 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -274,6 +274,7 @@ typedef struct {
int32_t rowSize;
int32_t numOfRows;
void * pIter;
+ void * pVgIter;
void ** ppShow;
int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS];
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 73be7ea045f5255908f39d669d0fba15e630c9fc..7dd199cca4248e5467017e1b6247f3b534c45711 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -196,14 +196,20 @@ int32_t mnodeInitDnodes() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DNODE, mnodeProcessCreateDnodeMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DNODE, mnodeProcessDropDnodeMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CONFIG_DNODE, mnodeProcessCfgDnodeMsg);
+
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP, mnodeProcessCfgDnodeMsgRsp);
mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_STATUS, mnodeProcessDnodeStatusMsg);
+
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_MODULE, mnodeGetModuleMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_MODULE, mnodeRetrieveModules);
+
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeGetConfigMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeRetrieveConfigs);
+
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VNODES, mnodeGetVnodeMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VNODES, mnodeRetrieveVnodes);
+ mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_VNODES, mnodeCancelGetNextVgroup);
+
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DNODE, mnodeGetDnodeMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DNODE, mnodeRetrieveDnodes);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DNODE, mnodeCancelGetNextDnode);
@@ -298,7 +304,7 @@ void *mnodeGetDnodeByEp(char *ep) {
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
- if (strcmp(ep, pDnode->dnodeEp) == 0) {
+ if (strncasecmp(ep, pDnode->dnodeEp, TSDB_EP_LEN) == 0) {
mnodeCancelGetNextDnode(pIter);
return pDnode;
}
@@ -1199,7 +1205,12 @@ static int32_t mnodeGetVnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
SDnodeObj *pDnode = NULL;
if (pShow->payloadLen > 0 ) {
- pDnode = mnodeGetDnodeByEp(pShow->payload);
+ char ep[TSDB_EP_LEN] = {0};
+ // not use tstrncpy to make runtime happy
+ uint16_t len = (pShow->payloadLen + 1) > TSDB_EP_LEN ? TSDB_EP_LEN :(pShow->payloadLen + 1);
+ strncpy(ep, pShow->payload, len - 1);
+
+ pDnode = mnodeGetDnodeByEp(ep);
} else {
void *pIter = mnodeGetNextDnode(NULL, (SDnodeObj **)&pDnode);
mnodeCancelGetNextDnode(pIter);
@@ -1227,13 +1238,12 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo
pDnode = (SDnodeObj *)(pShow->pIter);
if (pDnode != NULL) {
- void *pIter = NULL;
SVgObj *pVgroup;
while (1) {
- pIter = mnodeGetNextVgroup(pIter, &pVgroup);
+ pShow->pVgIter = mnodeGetNextVgroup(pShow->pVgIter, &pVgroup);
if (pVgroup == NULL) break;
- for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
+ for (int32_t i = 0; i < pVgroup->numOfVnodes && numOfRows < rows; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->pDnode == pDnode) {
cols = 0;
@@ -1245,10 +1255,13 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, syncRole[pVgid->role]);
cols++;
-
numOfRows++;
+
}
}
+ if (numOfRows >= rows) {
+ break;
+ }
mnodeDecVgroupRef(pVgroup);
}
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 570f5c344b624eea1f23fd13f11bfc6e230c61d5..bbfdb52e058c9e8c4ce3d3fe9d06715e9c0483aa 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -422,8 +422,13 @@ static void* mnodePutShowObj(SShowObj *pShow) {
static void mnodeFreeShowObj(void *data) {
SShowObj *pShow = *(SShowObj **)data;
- if (tsMnodeShowFreeIterFp[pShow->type] != NULL && pShow->pIter != NULL) {
- (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter);
+ if (tsMnodeShowFreeIterFp[pShow->type] != NULL) {
+ if (pShow->pVgIter != NULL) {
+ // only used in 'show vnodes "ep"'
+ (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pVgIter);
+ } else {
+ if (pShow->pIter != NULL) (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter);
+ }
}
mDebug("%p, show is destroyed, data:%p index:%d", pShow, data, pShow->index);
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 0bc114ffdfe8d59f4941536b56bd95be96a03d0b..a6158906a7cc77b57244594fe51881e5df0b68c8 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code), pStable->numOfTags);
-
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code));
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1321,6 +1326,9 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code));
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1376,6 +1384,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code));
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1444,6 +1455,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code));
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1489,6 +1503,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
tstrerror(code));
+ if (code == TSDB_CODE_SUCCESS) {
+ code = mnodeGetSuperTableMeta(pMsg);
+ }
return code;
}
@@ -1518,6 +1535,13 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) {
// update
SSchema *schema = (SSchema *) (pStable->schema + col);
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
+
+ if (pAlter->schema[0].bytes <= schema->bytes) {
+ mError("msg:%p, app:%p stable:%s, modify column len. column:%s, len from %d to %d", pMsg, pMsg->rpcMsg.ahandle,
+ pStable->info.tableId, name, schema->bytes, pAlter->schema[0].bytes);
+ return TSDB_CODE_MND_INVALID_COLUMN_LENGTH;
+ }
+
schema->bytes = pAlter->schema[0].bytes;
pStable->sversion++;
mInfo("msg:%p, app:%p stable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
@@ -1548,6 +1572,12 @@ static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg) {
// update
SSchema *schema = (SSchema *) (pStable->schema + col + pStable->numOfColumns);
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
+ if (pAlter->schema[0].bytes <= schema->bytes) {
+ mError("msg:%p, app:%p stable:%s, modify tag len. tag:%s, len from %d to %d", pMsg, pMsg->rpcMsg.ahandle,
+ pStable->info.tableId, name, schema->bytes, pAlter->schema[0].bytes);
+ return TSDB_CODE_MND_INVALID_TAG_LENGTH;
+ }
+
schema->bytes = pAlter->schema[0].bytes;
pStable->tversion++;
mInfo("msg:%p, app:%p stable %s, start to modify tag len %s to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
@@ -2921,10 +2951,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2;
}
- pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
- if (pMultiMeta == NULL) {
+ SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
+ if (pMultiMeta1 == NULL) {
return NULL;
}
+ pMultiMeta = pMultiMeta1;
}
return pMultiMeta;
diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h
index e7a3ec13ae1b3ff9a94724a4cd28e2b4bb40ac1c..4b7925074075b7eef04d2281d3ba7649740bae1e 100644
--- a/src/os/inc/osSystem.h
+++ b/src/os/inc/osSystem.h
@@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename);
void* taosLoadSym(void* handle, char* name);
void taosCloseDll(void *handle);
+int taosSetConsoleEcho(bool on);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c
index 17cafdd6644ed2adbb0a402689f9f300fa615d03..f152e36d7b9f41e5ddf97db3f5d0c4cf2d714632 100644
--- a/src/os/src/darwin/darwinSystem.c
+++ b/src/os/src/darwin/darwinSystem.c
@@ -29,4 +29,28 @@ void* taosLoadSym(void* handle, char* name) {
void taosCloseDll(void *handle) {
}
+int taosSetConsoleEcho(bool on)
+{
+#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL)
+ int err;
+ struct termios term;
+
+ if (tcgetattr(STDIN_FILENO, &term) == -1) {
+ perror("Cannot get the attribution of the terminal");
+ return -1;
+ }
+
+ if (on)
+ term.c_lflag|=ECHOFLAGS;
+ else
+ term.c_lflag &=~ECHOFLAGS;
+
+ err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term);
+ if (err == -1 && err == EINTR) {
+ perror("Cannot set the attribution of the terminal");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c
index d8194feab4011501097fdcce937d88b87ea41af0..22954f1523dc229b8446e339a2d142a0b4b7b023 100644
--- a/src/os/src/detail/osMemory.c
+++ b/src/os/src/detail/osMemory.c
@@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
void * tptr = (void *)((char *)ptr - sizeof(size_t));
size_t tsize = size + sizeof(size_t);
- tptr = realloc(tptr, tsize);
- if (tptr == NULL) return NULL;
+ void* tptr1 = realloc(tptr, tsize);
+ if (tptr1 == NULL) return NULL;
+ tptr = tptr1;
*(size_t *)tptr = size;
diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c
index b7b268b19e6b6f92babb74cfd3f23793be037cd0..35ca64d79f8b7a883014fd6ca980300ede22d6e2 100644
--- a/src/os/src/linux/linuxEnv.c
+++ b/src/os/src/linux/linuxEnv.c
@@ -32,6 +32,13 @@ void osInit() {
strcpy(tsDataDir, "/var/lib/tq");
strcpy(tsLogDir, "/var/log/tq");
strcpy(tsScriptDir, "/etc/tq");
+#elif (_TD_PRO_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "/etc/ProDB");
+ }
+ strcpy(tsDataDir, "/var/lib/ProDB");
+ strcpy(tsLogDir, "/var/log/ProDB");
+ strcpy(tsScriptDir, "/etc/ProDB");
#else
if (configDir[0] == 0) {
strcpy(configDir, "/etc/taos");
@@ -50,14 +57,20 @@ void osInit() {
char* taosGetCmdlineByPID(int pid) {
static char cmdline[1024];
sprintf(cmdline, "/proc/%d/cmdline", pid);
- FILE* f = fopen(cmdline, "r");
- if (f) {
- size_t size;
- size = fread(cmdline, sizeof(char), 1024, f);
- if (size > 0) {
- if ('\n' == cmdline[size - 1]) cmdline[size - 1] = '\0';
- }
- fclose(f);
+
+ int fd = open(cmdline, O_RDONLY);
+ if (fd >= 0) {
+ int n = read(fd, cmdline, sizeof(cmdline) - 1);
+ if (n < 0) n = 0;
+
+ if (n > 0 && cmdline[n - 1] == '\n') --n;
+
+ cmdline[n] = 0;
+
+ close(fd);
+ } else {
+ cmdline[0] = 0;
}
+
return cmdline;
}
diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c
index 052b7a22a8e9fe5dafffc4a90c8efa3643e4a06c..a82149dccb1f71e6fbdc2b62d066f04ff52c251d 100644
--- a/src/os/src/linux/osSystem.c
+++ b/src/os/src/linux/osSystem.c
@@ -51,4 +51,28 @@ void taosCloseDll(void *handle) {
}
}
+int taosSetConsoleEcho(bool on)
+{
+#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL)
+ int err;
+ struct termios term;
+
+ if (tcgetattr(STDIN_FILENO, &term) == -1) {
+ perror("Cannot get the attribution of the terminal");
+ return -1;
+ }
+
+ if (on)
+ term.c_lflag |= ECHOFLAGS;
+ else
+ term.c_lflag &= ~ECHOFLAGS;
+
+ err = tcsetattr(STDIN_FILENO, TCSAFLUSH, &term);
+ if (err == -1 || err == EINTR) {
+ perror("Cannot set the attribution of the terminal");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c
index b35cb8f040aec5ff4b4fb12665d0842e72958ba1..6f46bb43c75ff2c9735fc53a11bce585c1c213f6 100644
--- a/src/os/src/windows/wEnv.c
+++ b/src/os/src/windows/wEnv.c
@@ -39,6 +39,14 @@ void osInit() {
strcpy(tsDataDir, "C:/TQ/data");
strcpy(tsLogDir, "C:/TQ/log");
strcpy(tsScriptDir, "C:/TQ/script");
+#elif (_TD_PRO_ == true)
+ if (configDir[0] == 0) {
+ strcpy(configDir, "C:/ProDB/cfg");
+ }
+ strcpy(tsVnodeDir, "C:/ProDB/data");
+ strcpy(tsDataDir, "C:/ProDB/data");
+ strcpy(tsLogDir, "C:/ProDB/log");
+ strcpy(tsScriptDir, "C:/ProDB/script");
#else
if (configDir[0] == 0) {
strcpy(configDir, "C:/TDengine/cfg");
diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c
index 553aecaf0a9be65fb5d532668d28ee1a77fbbd39..aa458548843b977ac7fed4070047ac8d693b2d31 100644
--- a/src/os/src/windows/wGetline.c
+++ b/src/os/src/windows/wGetline.c
@@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
*n += MIN_CHUNK;
nchars_avail = (int32_t)(*n + *lineptr - read_pos);
- *lineptr = realloc(*lineptr, *n);
- if (!*lineptr) {
+ char* lineptr1 = realloc(*lineptr, *n);
+ if (!lineptr1) {
errno = ENOMEM;
return -1;
}
+ *lineptr = lineptr1;
+
read_pos = *n - nchars_avail + *lineptr;
assert((*lineptr + *n) == (read_pos + nchars_avail));
}
diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c
index 17cafdd6644ed2adbb0a402689f9f300fa615d03..564005f79be71c9e8649442e4b29651cdfd993fd 100644
--- a/src/os/src/windows/wSystem.c
+++ b/src/os/src/windows/wSystem.c
@@ -30,3 +30,17 @@ void taosCloseDll(void *handle) {
}
+int taosSetConsoleEcho(bool on)
+{
+ HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
+ DWORD mode = 0;
+ GetConsoleMode(hStdin, &mode );
+ if (on) {
+ mode |= ENABLE_ECHO_INPUT;
+ } else {
+ mode &= ~ENABLE_ECHO_INPUT;
+ }
+ SetConsoleMode(hStdin, mode);
+
+ return 0;
+}
diff --git a/src/os/tests/CMakeLists.txt b/src/os/tests/CMakeLists.txt
index 3c477641899994bf34237e93122c3d83f0365fad..9ec5076b7201b2d5ed9b2b6eb682eea7d6a83827 100644
--- a/src/os/tests/CMakeLists.txt
+++ b/src/os/tests/CMakeLists.txt
@@ -17,5 +17,5 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(osTest ${SOURCE_LIST})
- TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread)
+ TARGET_LINK_LIBRARIES(osTest taos os cJson tutil common gtest pthread)
ENDIF()
diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt
index 57fc2ee3a2692c239d7fa36d6e55ddae738a2720..89fdc141b66adafb9f882dd6f59eca54053aff6c 100644
--- a/src/plugins/http/CMakeLists.txt
+++ b/src/plugins/http/CMakeLists.txt
@@ -6,6 +6,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h
index 0a5822b90893861eb12aea756bf877bc81730413..6c567e23bc817957d7f376ef101f8e5ca88559e6 100644
--- a/src/plugins/http/inc/httpInt.h
+++ b/src/plugins/http/inc/httpInt.h
@@ -147,9 +147,11 @@ typedef struct HttpContext {
int32_t state;
uint8_t reqType;
uint8_t parsed;
+ uint8_t error;
char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[HTTP_PASSWORD_LEN];
+ char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
TAOS * taos;
void * ppContext;
HttpSession *session;
diff --git a/src/plugins/http/inc/httpRestHandle.h b/src/plugins/http/inc/httpRestHandle.h
index 632a1dc64739e39d1e9671fd41f9c224597eff07..df405685e91a520d8b948fdc056d9386626368a2 100644
--- a/src/plugins/http/inc/httpRestHandle.h
+++ b/src/plugins/http/inc/httpRestHandle.h
@@ -22,12 +22,12 @@
#include "httpResp.h"
#include "httpSql.h"
-#define REST_ROOT_URL_POS 0
-#define REST_ACTION_URL_POS 1
-#define REST_USER_URL_POS 2
-#define REST_PASS_URL_POS 3
+#define REST_ROOT_URL_POS 0
+#define REST_ACTION_URL_POS 1
+#define REST_USER_USEDB_URL_POS 2
+#define REST_PASS_URL_POS 3
void restInitHandle(HttpServer* pServer);
bool restProcessRequest(struct HttpContext* pContext);
-#endif
\ No newline at end of file
+#endif
diff --git a/src/plugins/http/inc/httpUtil.h b/src/plugins/http/inc/httpUtil.h
index 54c95b6980f8241c3ea6c8e563e0e42c7c737286..21690ebca96d35423e126a9e747d8ce6bb5a43a0 100644
--- a/src/plugins/http/inc/httpUtil.h
+++ b/src/plugins/http/inc/httpUtil.h
@@ -17,6 +17,7 @@
#define TDENGINE_HTTP_UTIL_H
bool httpCheckUsedbSql(char *sql);
+bool httpCheckAlterSql(char *sql);
void httpTimeToString(int32_t t, char *buf, int32_t buflen);
bool httpUrlMatch(HttpContext *pContext, int32_t pos, char *cmp);
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index 51adef11b9af3ebb83537024edbb3ba369aaeb03..11945453c56ab7fdd1fc8b0c4f2510bbbdda1a6e 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -188,11 +188,12 @@ void httpCloseContextByApp(HttpContext *pContext) {
pContext->parsed = false;
bool keepAlive = true;
- if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) {
+ if (pContext->error == true) {
+ keepAlive = false;
+ } else if (parser && parser->httpVersion == HTTP_VERSION_10 && parser->keepAlive != HTTP_KEEPALIVE_ENABLE) {
keepAlive = false;
} else if (parser && parser->httpVersion != HTTP_VERSION_10 && parser->keepAlive == HTTP_KEEPALIVE_DISABLE) {
keepAlive = false;
- } else {
}
if (keepAlive) {
diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c
index d51c774ff269d5790868727941a632d133dd6733..9719d93824b50064ec1cf23677c641428434592c 100644
--- a/src/plugins/http/src/httpHandle.c
+++ b/src/plugins/http/src/httpHandle.c
@@ -35,6 +35,7 @@ bool httpProcessData(HttpContext* pContext) {
if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING)) {
httpTrace("context:%p, fd:%d, state:%s not in ready state, stop process request", pContext, pContext->fd,
httpContextStateStr(pContext->state));
+ pContext->error = true;
httpCloseContextByApp(pContext);
return false;
}
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index 3c72b795eef69186ef4e6308937678589224c60d..86e0f2f40beffdf7d035ba3329d51bb69c2cf796 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -272,26 +272,35 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: {
+ mod = ((t) % 1000 + 1000) % 1000;
+ if (t < 0 && mod != 0) {
+ t -= 1000;
+ }
quot = t / 1000;
fractionLen = 5;
format = ".%03" PRId64;
- mod = t % 1000;
break;
}
case TSDB_TIME_PRECISION_MICRO: {
+ mod = ((t) % 1000000 + 1000000) % 1000000;
+ if (t < 0 && mod != 0) {
+ t -= 1000000;
+ }
quot = t / 1000000;
fractionLen = 8;
format = ".%06" PRId64;
- mod = t % 1000000;
break;
}
case TSDB_TIME_PRECISION_NANO: {
+ mod = ((t) % 1000000000 + 1000000000) % 1000000000;
+ if (t < 0 && mod != 0) {
+ t -= 1000000000;
+ }
quot = t / 1000000000;
fractionLen = 11;
format = ".%09" PRId64;
- mod = t % 1000000000;
break;
}
@@ -319,26 +328,35 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: {
+ mod = ((t) % 1000 + 1000) % 1000;
+ if (t < 0 && mod != 0) {
+ t -= 1000;
+ }
quot = t / 1000;
fractionLen = 5;
format = ".%03" PRId64;
- mod = t % 1000;
break;
}
case TSDB_TIME_PRECISION_MICRO: {
+ mod = ((t) % 1000000 + 1000000) % 1000000;
+ if (t < 0 && mod != 0) {
+ t -= 1000000;
+ }
quot = t / 1000000;
fractionLen = 8;
format = ".%06" PRId64;
- mod = t % 1000000;
break;
}
case TSDB_TIME_PRECISION_NANO: {
+ mod = ((t) % 1000000000 + 1000000000) % 1000000000;
+ if (t < 0 && mod != 0) {
+ t -= 1000000000;
+ }
quot = t / 1000000000;
fractionLen = 11;
format = ".%09" PRId64;
- mod = t % 1000000000;
break;
}
diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c
index 02f21037b8592cc847f02f1b2fbe3c01acd508d8..7066f19769754e78dffeed6a40b672584c0310f1 100644
--- a/src/plugins/http/src/httpParser.c
+++ b/src/plugins/http/src/httpParser.c
@@ -663,7 +663,7 @@ static int32_t httpParserOnTarget(HttpParser *parser, HTTP_PARSER_STATE state, c
HttpContext *pContext = parser->pContext;
int32_t ok = 0;
do {
- if (!isspace(c) && c != '\r' && c != '\n') {
+ if (!isspace(c)) {
if (httpAppendString(&parser->str, &c, 1)) {
httpError("context:%p, fd:%d, parser state:%d, char:[%c]%02x, oom", pContext, pContext->fd, state, c, c);
ok = -1;
diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c
index 79e728dd456fb8a340e50f9d7e9cbd3c409614db..1d05b455cb5c66e4f492140e1f337210da04caef 100644
--- a/src/plugins/http/src/httpResp.c
+++ b/src/plugins/http/src/httpResp.c
@@ -147,6 +147,8 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) {
httpCode = pContext->parser->httpCode;
}
+ pContext->error = true;
+
char *httpCodeStr = httpGetStatusDesc(httpCode);
httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo & 0XFFFF, tstrerror(errNo));
}
diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c
index a285670d20ae1e5a9c8bd6c41971fc57cda3320a..24e4f90244cf52799fde1a45054875a4bf3d1850 100644
--- a/src/plugins/http/src/httpRestHandle.c
+++ b/src/plugins/http/src/httpRestHandle.c
@@ -19,6 +19,7 @@
#include "httpLog.h"
#include "httpRestHandle.h"
#include "httpRestJson.h"
+#include "tglobal.h"
static HttpDecodeMethod restDecodeMethod = {"rest", restProcessRequest};
static HttpDecodeMethod restDecodeMethod2 = {"restful", restProcessRequest};
@@ -62,11 +63,11 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser;
- if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) {
+ if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN);
return true;
}
@@ -107,6 +108,24 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
HttpSqlCmd* cmd = &(pContext->singleCmd);
cmd->nativSql = sql;
+ /* find if there is db_name in url */
+ pContext->db[0] = '\0';
+
+ HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS];
+ if (tsHttpDbNameMandatory) {
+ if (path->pos == 0) {
+ httpError("context:%p, fd:%d, user:%s, database name is mandatory", pContext, pContext->fd, pContext->user);
+ httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVALID_URL);
+ return false;
+ }
+ }
+
+ if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') &&
+ (sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' '))
+ {
+ snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str);
+ }
+
pContext->reqType = HTTP_REQTYPE_SINGLE_SQL;
if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) {
pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod;
diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c
index 47f2d4ff5bcc513aafb8ea8f4e2a85db5a35b12a..13596b0e8a4ea4d183cc4bf75917fd08a9dd7290 100644
--- a/src/plugins/http/src/httpRestJson.c
+++ b/src/plugins/http/src/httpRestJson.c
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "tglobal.h"
+#include "tsclient.h"
#include "httpLog.h"
#include "httpJson.h"
#include "httpRestHandle.h"
@@ -62,13 +63,21 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
httpJsonItemToken(jsonBuf);
httpJsonToken(jsonBuf, JsonArrStt);
+ SSqlObj *pObj = (SSqlObj *) result;
+ bool isAlterSql = (pObj->sqlstr == NULL) ? false : httpCheckAlterSql(pObj->sqlstr);
+
if (num_fields == 0) {
httpJsonItemToken(jsonBuf);
httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN);
} else {
- for (int32_t i = 0; i < num_fields; ++i) {
+ if (isAlterSql == true) {
httpJsonItemToken(jsonBuf);
- httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
+ httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN);
+ } else {
+ for (int32_t i = 0; i < num_fields; ++i) {
+ httpJsonItemToken(jsonBuf);
+ httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
+ }
}
}
@@ -99,8 +108,14 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result)
httpJsonItemToken(jsonBuf);
httpJsonToken(jsonBuf, JsonArrStt);
- httpJsonItemToken(jsonBuf);
- httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
+ if (isAlterSql == true) {
+ httpJsonItemToken(jsonBuf);
+ httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN);
+ } else {
+ httpJsonItemToken(jsonBuf);
+ httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name));
+ }
+
httpJsonItemToken(jsonBuf);
httpJsonInt(jsonBuf, fields[i].type);
httpJsonItemToken(jsonBuf);
diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c
index f02859f165499b0c69b095599dd47890e644c604..13a0835c3960333c6d12aa443025de5fb95d565e 100644
--- a/src/plugins/http/src/httpServer.c
+++ b/src/plugins/http/src/httpServer.c
@@ -191,8 +191,6 @@ static void httpProcessHttpData(void *param) {
if (httpReadData(pContext)) {
(*(pThread->processData))(pContext);
atomic_fetch_add_32(&pServer->requestNum, 1);
- } else {
- httpReleaseContext(pContext/*, false*/);
}
}
}
@@ -402,13 +400,17 @@ static bool httpReadData(HttpContext *pContext) {
} else if (nread < 0) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
- return false; // later again
+ continue; // later again
} else {
httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
+ taosCloseSocket(pContext->fd);
+ httpReleaseContext(pContext/*, false */);
return false;
}
} else {
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
+ taosCloseSocket(pContext->fd);
+ httpReleaseContext(pContext/*, false */);
return false;
}
}
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index c2e723732a0f9d786994527c6cd1ac77f273a736..602767a6563b3ca3430501c0dbcee65333f1d44b 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -405,7 +405,6 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) {
if (pContext->session == NULL) {
httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL);
- httpCloseContextByApp(pContext);
} else {
httpExecCmd(pContext);
}
@@ -419,6 +418,11 @@ void httpProcessRequest(HttpContext *pContext) {
&(pContext->taos));
httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
pContext->taos);
+
+ if (pContext->taos != NULL) {
+ STscObj *pObj = pContext->taos;
+ pObj->from = TAOS_REQ_FROM_HTTP;
+ }
} else {
httpExecCmd(pContext);
}
diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c
index ade50bdad6bf6b0a7a2d43bb354851d90686be49..f30ac7326eef20f4abf5558b288f16f6ee313b42 100644
--- a/src/plugins/http/src/httpUtil.c
+++ b/src/plugins/http/src/httpUtil.c
@@ -21,6 +21,7 @@
#include "httpResp.h"
#include "httpSql.h"
#include "httpUtil.h"
+#include "ttoken.h"
bool httpCheckUsedbSql(char *sql) {
if (strstr(sql, "use ") != NULL) {
@@ -29,6 +30,17 @@ bool httpCheckUsedbSql(char *sql) {
return false;
}
+bool httpCheckAlterSql(char *sql) {
+ int32_t index = 0;
+
+ do {
+ SStrToken t0 = tStrGetToken(sql, &index, false);
+ if (t0.type != TK_LP) {
+ return t0.type == TK_ALTER;
+ }
+ } while (1);
+}
+
void httpTimeToString(int32_t t, char *buf, int32_t buflen) {
memset(buf, 0, (size_t)buflen);
char ts[32] = {0};
diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c
index 6e583fe0dfd809bac8c0aabf56e48bb33bd910ce..fea793fa860fd17ff30bcecae1436180bc6b34bf 100644
--- a/src/plugins/monitor/src/monMain.c
+++ b/src/plugins/monitor/src/monMain.c
@@ -34,7 +34,7 @@
#define monTrace(...) { if (monDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monDebugFlag, __VA_ARGS__); }}
#define SQL_LENGTH 1030
-#define LOG_LEN_STR 100
+#define LOG_LEN_STR 512
#define IP_LEN_STR TSDB_EP_LEN
#define CHECK_INTERVAL 1000
diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h
index d4116fbfb2daec9b47c4a891c3c886728e6ca515..4f7821708c3e9b3c3d0eb975125e1ad12c5f82a4 100644
--- a/src/query/inc/qAggMain.h
+++ b/src/query/inc/qAggMain.h
@@ -70,14 +70,14 @@ extern "C" {
#define TSDB_FUNC_DERIVATIVE 32
#define TSDB_FUNC_BLKINFO 33
-
-#define TSDB_FUNC_HISTOGRAM 34
-#define TSDB_FUNC_HLL 35
-#define TSDB_FUNC_MODE 36
-#define TSDB_FUNC_SAMPLE 37
-#define TSDB_FUNC_CEIL 38
-#define TSDB_FUNC_FLOOR 39
-#define TSDB_FUNC_ROUND 40
+#define TSDB_FUNC_CEIL 34
+#define TSDB_FUNC_FLOOR 35
+#define TSDB_FUNC_ROUND 36
+
+#define TSDB_FUNC_HISTOGRAM 37
+#define TSDB_FUNC_HLL 38
+#define TSDB_FUNC_MODE 39
+#define TSDB_FUNC_SAMPLE 40
#define TSDB_FUNC_MAVG 41
#define TSDB_FUNC_CSUM 42
@@ -88,6 +88,7 @@ extern "C" {
#define TSDB_FUNCSTATE_OF 0x10u // outer forward
#define TSDB_FUNCSTATE_NEED_TS 0x20u // timestamp is required during query processing
#define TSDB_FUNCSTATE_SELECTIVITY 0x40u // selectivity functions, can exists along with tag columns
+#define TSDB_FUNCSTATE_SCALAR 0x80u
#define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF
#define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 56fab57e26227212ca6d2502fc7e035e2af258d5..82f4f34a57c7d6d10a021fb2e426ff83cb3604e6 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -43,6 +43,8 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows)
+#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData? 1 : 0)
+
enum {
// when query starts to execute, this status will set
QUERY_NOT_COMPLETED = 0x1u,
@@ -84,11 +86,18 @@ typedef struct SResultRow {
char *key; // start key of current result row
} SResultRow;
+typedef struct SResultRowCell {
+ uint64_t groupId;
+ SResultRow *pRow;
+} SResultRowCell;
+
typedef struct SGroupResInfo {
int32_t totalGroup;
int32_t currentGroup;
int32_t index;
SArray* pRows; // SArray
+ bool ordered;
+ int32_t position;
} SGroupResInfo;
/**
@@ -219,6 +228,7 @@ typedef struct SQueryAttr {
bool distinct; // distinct query or not
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
+ bool multigroupResult; // multigroup result can exist in one SSDataBlock
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
@@ -254,7 +264,7 @@ typedef struct SQueryAttr {
SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query.
SSingleColumnFilterInfo* pFilterInfo;
- SFilterInfo *pFilters;
+ void *pFilters;
void* tsdb;
SMemRef memRef;
@@ -281,8 +291,9 @@ typedef struct SQueryRuntimeEnv {
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SHashObj* pResultRowHashTable; // quick locate the window object for each result
SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not
+ SArray* pResultRowArrayList; // The array list that contains the Result rows
char* keyBuf; // window key buffer
- SResultRowPool* pool; // window result object pool
+ SResultRowPool* pool; // The window result objects pool, all the resultRow Objects are allocated and managed by this object.
char** prevRow;
SArray* prevResult; // intermediate result, SArray
@@ -335,6 +346,7 @@ enum OPERATOR_TYPE_E {
OP_StateWindow = 22,
OP_AllTimeWindow = 23,
OP_AllMultiTableTimeInterval = 24,
+ OP_Order = 25,
};
typedef struct SOperatorInfo {
@@ -387,7 +399,6 @@ typedef struct SQueryParam {
char *sql;
char *tagCond;
char *colCond;
- char *tbnameCond;
char *prevResult;
SArray *pTableIdList;
SSqlExpr **pExpr;
@@ -395,7 +406,7 @@ typedef struct SQueryParam {
SExprInfo *pExprs;
SExprInfo *pSecExprs;
- SFilterInfo *pFilters;
+ void *pFilters;
SColIndex *pGroupColIndex;
SColumnInfo *pTagColumnInfo;
@@ -405,6 +416,11 @@ typedef struct SQueryParam {
SUdfInfo *pUdfInfo;
} SQueryParam;
+typedef struct SColumnDataParam{
+ int32_t numOfCols;
+ SArray* pDataBlock;
+} SColumnDataParam;
+
typedef struct STableScanInfo {
void *pQueryHandle;
int32_t numOfBlocks;
@@ -422,7 +438,6 @@ typedef struct STableScanInfo {
int32_t *rowCellInfoOffset;
SExprInfo *pExpr;
SSDataBlock block;
- bool loadExternalRows; // load external rows (prev & next rows)
int32_t numOfOutput;
int64_t elapsedTime;
@@ -465,16 +480,23 @@ typedef struct SLimitOperatorInfo {
} SLimitOperatorInfo;
typedef struct SSLimitOperatorInfo {
- int64_t groupTotal;
- int64_t currentGroupOffset;
-
- int64_t rowsTotal;
- int64_t currentOffset;
- SLimitVal limit;
- SLimitVal slimit;
-
- char **prevRow;
- SArray *orderColumnList;
+ int64_t groupTotal;
+ int64_t currentGroupOffset;
+
+ int64_t rowsTotal;
+ int64_t currentOffset;
+ SLimitVal limit;
+ SLimitVal slimit;
+
+ char **prevRow;
+ SArray *orderColumnList;
+ bool hasPrev;
+ bool ignoreCurrentGroup;
+ bool multigroupResult;
+ SSDataBlock *pRes; // result buffer
+ SSDataBlock *pPrevBlock;
+ int64_t capacity;
+ int64_t threshold;
} SSLimitOperatorInfo;
typedef struct SFilterOperatorInfo {
@@ -486,8 +508,9 @@ typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;
int64_t totalInputRows;
-
+ void **p;
SSDataBlock *existNewGroupBlock;
+ bool multigroupResult;
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
@@ -513,7 +536,13 @@ typedef struct SStateWindowOperatorInfo {
int32_t start;
char* prevData; // previous data
bool reptScan;
-} SStateWindowOperatorInfo ;
+} SStateWindowOperatorInfo;
+
+typedef struct SDistinctDataInfo {
+ int32_t index;
+ int32_t type;
+ int32_t bytes;
+} SDistinctDataInfo;
typedef struct SDistinctOperatorInfo {
SHashObj *pSet;
@@ -521,7 +550,9 @@ typedef struct SDistinctOperatorInfo {
bool recordNullVal; //has already record the null value, no need to try again
int64_t threshold;
int64_t outputCapacity;
- int32_t colIndex;
+ int32_t totalBytes;
+ char* buf;
+ SArray* pDistinctDataInfo;
} SDistinctOperatorInfo;
struct SGlobalMerger;
@@ -541,11 +572,18 @@ typedef struct SMultiwayMergeInfo {
bool hasDataBlockForNewGroup;
SSDataBlock *pExistBlock;
- bool hasPrev;
- bool groupMix;
SArray *udfInfo;
+ bool hasPrev;
+ bool multiGroupResults;
} SMultiwayMergeInfo;
+// todo support the disk-based sort
+typedef struct SOrderOperatorInfo {
+ int32_t colIndex;
+ int32_t order;
+ SSDataBlock *pDataBlock;
+} SOrderOperatorInfo;
+
void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream);
SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime);
@@ -558,7 +596,7 @@ SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
-SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
+SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult);
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
@@ -567,14 +605,15 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
- int32_t numOfRows, void* merger, bool groupMix);
-SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
+ int32_t numOfRows, void* merger);
+SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp);
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
-SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
+SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger, bool multigroupResult);
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput);
+SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal);
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);
@@ -582,11 +621,11 @@ SSDataBlock* doSLimit(void* param, bool* newgroup);
int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo, uint64_t qId);
void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock);
-void doSetFilterColInfo(SFilterInfo *pFilters, SSDataBlock* pBlock);
bool doFilterDataBlock(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t numOfRows, int8_t* p);
void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows);
+
void* destroyOutputBuf(SSDataBlock* pBlock);
void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
@@ -595,6 +634,7 @@ int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int3
void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset);
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows);
void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity);
+void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput);
void freeParam(SQueryParam *param);
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);
@@ -604,11 +644,11 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo,
SSqlExpr **pExpr, SExprInfo *prevExpr, SUdfInfo *pUdfInfo);
-int32_t createQueryFilter(char *data, uint16_t len, SFilterInfo** pFilters);
+int32_t createQueryFilter(char *data, uint16_t len, void** pFilters);
SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
- SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, SFilterInfo* pFilters, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo);
+ SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, void* pFilters, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo);
int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start,
int32_t prevResultLen, void* merger);
@@ -623,6 +663,7 @@ int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg);
bool isQueryKilled(SQInfo *pQInfo);
int32_t checkForQueryBuf(size_t numOfTables);
+bool checkNeedToCompressQueryCol(SQInfo *pQInfo);
bool doBuildResCheck(SQInfo* pQInfo);
void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status);
@@ -631,7 +672,7 @@ void destroyUdfInfo(SUdfInfo* pUdfInfo);
bool isValidQInfo(void *param);
-int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
+int32_t doDumpQueryResult(SQInfo *pQInfo, char *data, int8_t compressed, int32_t *compLen);
size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows);
void setQueryKilled(SQInfo *pQInfo);
@@ -647,5 +688,6 @@ void freeQueryAttr(SQueryAttr *pQuery);
int32_t getMaximumIdleDurationSec();
void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t type);
+int32_t getColumnDataFromId(void *param, int32_t id, void **data);
#endif // TDENGINE_QEXECUTOR_H
diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h
index cf0e8ce31af91ad9d94feed2045265862a57545a..d4a9ed0cbcef0a52085dcd60569270037fb57908 100644
--- a/src/query/inc/qExtbuffer.h
+++ b/src/query/inc/qExtbuffer.h
@@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
void tOrderDescDestroy(tOrderDescriptor *pDesc);
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
+
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
int32_t numOfRowsToWrite, int32_t srcCapacity);
@@ -227,6 +229,8 @@ typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t
void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType);
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
+
int32_t compare_sa(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data);
int32_t compare_sd(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data);
diff --git a/src/query/inc/qFilter.h b/src/query/inc/qFilter.h
index 7a7b3157eaeaf7075b1d856b4e68568f61871885..c34a56cc1cd6e135947eee897f87d060880f15c7 100644
--- a/src/query/inc/qFilter.h
+++ b/src/query/inc/qFilter.h
@@ -31,10 +31,11 @@ extern "C" {
#define FILTER_DEFAULT_GROUP_UNIT_SIZE 2
#define FILTER_DUMMY_EMPTY_OPTR 127
-#define FILTER_DUMMY_RANGE_OPTR 126
#define MAX_NUM_STR_SIZE 40
+#define FILTER_RM_UNIT_MIN_ROWS 100
+
enum {
FLD_TYPE_COLUMN = 1,
FLD_TYPE_VALUE = 2,
@@ -70,12 +71,24 @@ enum {
FI_STATUS_CLONED = 8,
};
+enum {
+ FI_STATUS_BLK_ALL = 1,
+ FI_STATUS_BLK_EMPTY = 2,
+ FI_STATUS_BLK_ACTIVE = 4,
+};
+
enum {
RANGE_TYPE_UNIT = 1,
RANGE_TYPE_VAR_HASH = 2,
RANGE_TYPE_MR_CTX = 3,
};
+enum {
+ FI_ACTION_NO_NEED = 1,
+ FI_ACTION_CONTINUE,
+ FI_ACTION_STOP,
+};
+
typedef struct OptrStr {
uint16_t optr;
char *str;
@@ -98,7 +111,8 @@ typedef struct SFilterColRange {
typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const void *, __compar_fn_t);
typedef int32_t(*filter_desc_compare_func)(const void *, const void *);
-typedef bool(*filter_exec_func)(void *, int32_t, int8_t*);
+typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SDataStatis *, int16_t);
+typedef int32_t (*filer_get_col_from_id)(void *, int32_t, void **);
typedef struct SFilterRangeCompare {
int64_t s;
@@ -197,6 +211,7 @@ typedef struct SFilterComUnit {
void *colData;
void *valData;
void *valData2;
+ uint16_t colId;
uint16_t dataSize;
uint8_t dataType;
uint8_t optr;
@@ -224,7 +239,11 @@ typedef struct SFilterInfo {
uint8_t *unitRes; // result
uint8_t *unitFlags; // got result
SFilterRangeCtx **colRange;
- filter_exec_func func;
+ filter_exec_func func;
+ uint8_t blkFlag;
+ uint16_t blkGroupNum;
+ uint16_t *blkUnits;
+ int8_t *blkUnitRes;
SFilterPCtx pctx;
} SFilterInfo;
@@ -265,12 +284,13 @@ typedef struct SFilterInfo {
#define CHK_RET(c, r) do { if (c) { return r; } } while (0)
#define CHK_JMP(c) do { if (c) { goto _return; } } while (0)
#define CHK_LRETV(c,...) do { if (c) { qError(__VA_ARGS__); return; } } while (0)
-#define CHK_LRET(c, r,...) do { if (c) { qError(__VA_ARGS__); return r; } } while (0)
+#define CHK_LRET(c, r,...) do { if (c) { if (r) {qError(__VA_ARGS__); } else { qDebug(__VA_ARGS__); } return r; } } while (0)
#define FILTER_GET_FIELD(i, id) (&((i)->fields[(id).type].fields[(id).idx]))
#define FILTER_GET_COL_FIELD(i, idx) (&((i)->fields[FLD_TYPE_COLUMN].fields[idx]))
#define FILTER_GET_COL_FIELD_TYPE(fi) (((SSchema *)((fi)->desc))->type)
#define FILTER_GET_COL_FIELD_SIZE(fi) (((SSchema *)((fi)->desc))->bytes)
+#define FILTER_GET_COL_FIELD_ID(fi) (((SSchema *)((fi)->desc))->colId)
#define FILTER_GET_COL_FIELD_DESC(fi) ((SSchema *)((fi)->desc))
#define FILTER_GET_COL_FIELD_DATA(fi, ri) ((char *)(fi)->data + ((SSchema *)((fi)->desc))->bytes * (ri))
#define FILTER_GET_VAL_FIELD_TYPE(fi) (((tVariant *)((fi)->desc))->nType)
@@ -280,10 +300,12 @@ typedef struct SFilterInfo {
#define FILTER_GROUP_UNIT(i, g, uid) ((i)->units + (g)->unitIdxs[uid])
#define FILTER_UNIT_LEFT_FIELD(i, u) FILTER_GET_FIELD(i, (u)->left)
#define FILTER_UNIT_RIGHT_FIELD(i, u) FILTER_GET_FIELD(i, (u)->right)
+#define FILTER_UNIT_RIGHT2_FIELD(i, u) FILTER_GET_FIELD(i, (u)->right2)
#define FILTER_UNIT_DATA_TYPE(u) ((u)->compare.type)
#define FILTER_UNIT_COL_DESC(i, u) FILTER_GET_COL_FIELD_DESC(FILTER_UNIT_LEFT_FIELD(i, u))
#define FILTER_UNIT_COL_DATA(i, u, ri) FILTER_GET_COL_FIELD_DATA(FILTER_UNIT_LEFT_FIELD(i, u), ri)
#define FILTER_UNIT_COL_SIZE(i, u) FILTER_GET_COL_FIELD_SIZE(FILTER_UNIT_LEFT_FIELD(i, u))
+#define FILTER_UNIT_COL_ID(i, u) FILTER_GET_COL_FIELD_ID(FILTER_UNIT_LEFT_FIELD(i, u))
#define FILTER_UNIT_VAL_DATA(i, u) FILTER_GET_VAL_FIELD_DATA(FILTER_UNIT_RIGHT_FIELD(i, u))
#define FILTER_UNIT_COL_IDX(u) ((u)->left.idx)
#define FILTER_UNIT_OPTR(u) ((u)->compare.optr)
@@ -308,14 +330,16 @@ typedef struct SFilterInfo {
#define FILTER_EMPTY_RES(i) FILTER_GET_FLAG((i)->status, FI_STATUS_EMPTY)
-extern int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t options);
-extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t* p);
-extern int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data);
+extern int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options);
+extern bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols);
+extern int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from_id fp);
extern int32_t filterGetTimeRange(SFilterInfo *info, STimeWindow *win);
extern int32_t filterConverNcharColumns(SFilterInfo* pFilterInfo, int32_t rows, bool *gotNchar);
extern int32_t filterFreeNcharColumns(SFilterInfo* pFilterInfo);
extern void filterFreeInfo(SFilterInfo *info);
extern bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t numOfCols, int32_t numOfRows);
+extern int32_t filterIsIndexedColumnQuery(SFilterInfo* info, int32_t idxId, bool *res);
+extern int32_t filterGetIndexedColumnInfo(SFilterInfo* info, char** val, int32_t *order, int32_t *flag);
#ifdef __cplusplus
}
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index 531ff06565dba837c696c6069d409ccf536cbe8c..c231c90996e00d84a70c7141eac69c5a59e20254 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -80,6 +80,7 @@ typedef struct tVariantListItem {
} tVariantListItem;
typedef struct SIntervalVal {
+ int32_t token;
SStrToken interval;
SStrToken offset;
} SIntervalVal;
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index d6b04b033062bd17e451c0f9af4d2bf8f84190a1..948a1ae91e01331c4f566ac5089485f717fc5632 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -38,12 +38,6 @@ typedef struct SJoinInfo {
} SJoinInfo;
typedef struct STagCond {
- // relation between tbname list and query condition, including : TK_AND or TK_OR
- int16_t relType;
-
- // tbname query condition, only support tbname query condition on one table
- SCond tbnameCond;
-
// join condition, only support two tables join currently
SJoinInfo joinInfo;
@@ -93,6 +87,7 @@ typedef struct STableMetaInfo {
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray, involved tag columns
+ int32_t joinTagNum;
} STableMetaInfo;
struct SQInfo; // global merge operator
@@ -100,7 +95,7 @@ struct SQueryAttr; // query object
typedef struct STableFilter {
uint64_t uid;
- SFilterInfo info;
+ void *info;
} STableFilter;
typedef struct SQueryInfo {
@@ -165,6 +160,7 @@ typedef struct SQueryInfo {
bool orderProjectQuery;
bool stateWindow;
bool globalMerge;
+ bool multigroupResult;
} SQueryInfo;
/**
diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h
index 5c48c43c45fdb5cffdcc01901f1ede4a3b98d240..87a221943a566cae3f873d5859a1a5cc09cf9989 100644
--- a/src/query/inc/queryLog.h
+++ b/src/query/inc/queryLog.h
@@ -24,10 +24,10 @@ extern "C" {
extern uint32_t qDebugFlag;
-#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0)
-#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0)
-#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0)
-#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0)
+#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0)
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index 8b43e55693c53fffb19e73b5b9ef7ccada4383a2..336e8620f210351471bddb9c94d56fcaa7f8a0fc 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -11,7 +11,7 @@
%left OR.
%left AND.
%right NOT.
-%left EQ NE ISNULL NOTNULL IS LIKE GLOB BETWEEN IN.
+%left EQ NE ISNULL NOTNULL IS LIKE MATCH NMATCH GLOB BETWEEN IN.
%left GT GE LT LE.
%left BITAND BITOR LSHIFT RSHIFT.
%left PLUS MINUS.
@@ -162,7 +162,10 @@ cmd ::= DESCRIBE ids(X) cpxName(Y). {
X.n += Y.n;
setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X);
}
-
+cmd ::= DESC ids(X) cpxName(Y). {
+ X.n += Y.n;
+ setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X);
+}
/////////////////////////////////THE ALTER STATEMENT////////////////////////////////////////
cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); }
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);}
@@ -479,7 +482,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
-select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
+select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_option(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
}
@@ -493,7 +496,7 @@ union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); }
cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// Support for the SQL exprssion without from & where subclauses, e.g.,
-// select current_database()
+// select database()
// select server_version()
// select client_version()
// select server_state()
@@ -569,10 +572,14 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
%type tmvar {SStrToken}
tmvar(A) ::= VARIABLE(X). {A = X;}
-%type interval_opt {SIntervalVal}
-interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0;}
-interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X;}
-interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
+%type interval_option {SIntervalVal}
+interval_option(N) ::= intervalKey(A) LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.token = A;}
+interval_option(N) ::= intervalKey(A) LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X; N.token = A;}
+interval_option(N) ::= . {memset(&N, 0, sizeof(N));}
+
+%type intervalKey {int32_t}
+intervalKey(A) ::= INTERVAL. {A = TK_INTERVAL;}
+intervalKey(A) ::= EVERY. {A = TK_EVERY; }
%type session_option {SSessionWindowVal}
session_option(X) ::= . {X.col.n = 0; X.gap.n = 0;}
@@ -581,6 +588,7 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
X.col = V;
X.gap = Y;
}
+
%type windowstate_option {SWindowStateVal}
windowstate_option(X) ::= . { X.col.n = 0; X.col.z = NULL;}
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. { X.col = V; }
@@ -743,6 +751,10 @@ expr(A) ::= expr(X) REM expr(Y). {A = tSqlExprCreate(X, Y, TK_REM); }
// like expression
expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); }
+// match expression
+expr(A) ::= expr(X) MATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_MATCH); }
+expr(A) ::= expr(X) NMATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_NMATCH); }
+
//in expression
expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSqlExprCreate(X, (tSqlExpr*)Y, TK_IN); }
@@ -908,5 +920,5 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
- LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
+ LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES.
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index c19628eb370a5db7d6e5940531b9954879e213ee..1fd682aebd6ac7899ca0a88f6a4744cd4ebbb006 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -179,7 +179,9 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY ||
functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ ||
- functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) {
+ functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_CEIL ||
+ functionId == TSDB_FUNC_FLOOR || functionId == TSDB_FUNC_ROUND)
+ {
*type = (int16_t)dataType;
*bytes = (int16_t)dataBytes;
@@ -405,7 +407,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
// TODO use hash table
int32_t isValidFunction(const char* name, int32_t len) {
- for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) {
+ for(int32_t i = 0; i <= TSDB_FUNC_ROUND; ++i) {
int32_t nameLen = (int32_t) strlen(aAggs[i].name);
if (len != nameLen) {
continue;
@@ -1214,6 +1216,31 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp
DUPATE_DATA_WITHOUT_TS(pCtx, *(int64_t *)output, v, notNullElems, isMin);
break;
}
+
+ case TSDB_DATA_TYPE_UTINYINT: {
+ uint8_t v = GET_UINT8_VAL(input);
+ DUPATE_DATA_WITHOUT_TS(pCtx, *(uint8_t *)output, v, notNullElems, isMin);
+ break;
+ }
+
+ case TSDB_DATA_TYPE_USMALLINT: {
+ uint16_t v = GET_UINT16_VAL(input);
+ DUPATE_DATA_WITHOUT_TS(pCtx, *(uint16_t *)output, v, notNullElems, isMin);
+ break;
+ }
+
+ case TSDB_DATA_TYPE_UINT: {
+ uint32_t v = GET_UINT32_VAL(input);
+ DUPATE_DATA_WITHOUT_TS(pCtx, *(uint32_t *)output, v, notNullElems, isMin);
+ break;
+ }
+
+ case TSDB_DATA_TYPE_UBIGINT: {
+ uint64_t v = GET_UINT64_VAL(input);
+ DUPATE_DATA_WITHOUT_TS(pCtx, *(uint64_t *)output, v, notNullElems, isMin);
+ break;
+ }
+
default:
break;
}
@@ -3670,6 +3697,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
return;
}
+ bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
+
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
} else if (type == TSDB_FILL_NULL) {
@@ -3677,7 +3706,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
} else if (type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
} else {
- if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
+ if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
if (type == TSDB_FILL_PREV) {
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
@@ -3716,13 +3745,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY skey = GET_TS_DATA(pCtx, 0);
if (type == TSDB_FILL_PREV) {
- if (skey > pCtx->startTs) {
+ if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
return;
}
if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
- if (ekey > skey && ekey <= pCtx->startTs) {
+ if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
+ ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
skey = ekey;
}
}
@@ -3731,10 +3761,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = skey;
char* val = NULL;
- if (ekey < pCtx->startTs) {
+ if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1);
- if (ekey < pCtx->startTs) {
+ if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
return;
}
@@ -3755,12 +3785,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
- if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
+ if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
+ || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
return;
}
- assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
-
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@@ -3788,11 +3817,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly
if (pCtx->size > 0) {
- // impose the timestamp check
- TSKEY key = GET_TS_DATA(pCtx, 0);
+ bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
+ TSKEY key;
+ char *pData;
+ int32_t typedData = 0;
+
+ if (ascQuery) {
+ key = GET_TS_DATA(pCtx, 0);
+ pData = GET_INPUT_DATA(pCtx, 0);
+ } else {
+ key = pCtx->start.key;
+ if (key == INT64_MIN) {
+ key = GET_TS_DATA(pCtx, 0);
+ pData = GET_INPUT_DATA(pCtx, 0);
+ } else {
+ if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
+ pData = pCtx->start.ptr;
+ } else {
+ typedData = 1;
+ pData = (char *)&pCtx->start.val;
+ }
+ }
+ }
+
+ //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
if (key == pCtx->startTs) {
- char *pData = GET_INPUT_DATA(pCtx, 0);
- assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
+ if (typedData) {
+ SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
+ } else {
+ assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
+ }
+
SET_VAL(pCtx, 1, 1);
} else {
interp_function_impl(pCtx);
@@ -4061,7 +4116,7 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD
} else {
pDist->maxRows = pSrc->maxRows;
pDist->minRows = pSrc->minRows;
-
+
int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS;
if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) {
++maxSteps;
@@ -4195,7 +4250,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
taosArrayDestroy(pDist->dataBlockInfos);
pDist->dataBlockInfos = NULL;
}
-
+
// cannot set the numOfIteratedElems again since it is set during previous iteration
pResInfo->numOfRes = 1;
pResInfo->hasResult = DATA_SET_FLAG;
@@ -4203,6 +4258,231 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
doFinalizer(pCtx);
}
+#define CFR_SET_VAL(type, data, pCtx, func, i, step, notNullElems) \
+ do { \
+ type *pData = (type *) data; \
+ type *pOutput = (type *) pCtx->pOutput; \
+ \
+ for (; i < pCtx->size && i >= 0; i += step) { \
+ if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \
+ continue; \
+ } \
+ \
+ *pOutput++ = (type) func((double) pData[i]); \
+ \
+ notNullElems++; \
+ } \
+ } while (0)
+
+#define CFR_SET_VAL_DOUBLE(data, pCtx, func, i, step, notNullElems) \
+ do { \
+ double *pData = (double *) data; \
+ double *pOutput = (double *) pCtx->pOutput; \
+ \
+ for (; i < pCtx->size && i >= 0; i += step) { \
+ if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \
+ continue; \
+ } \
+ \
+ SET_DOUBLE_VAL(pOutput, func(pData[i])); \
+ pOutput++; \
+ \
+ notNullElems++; \
+ } \
+ } while (0)
+
+static void ceil_function(SQLFunctionCtx *pCtx) {
+ void *data = GET_INPUT_DATA_LIST(pCtx);
+
+ int32_t notNullElems = 0;
+
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
+ int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
+
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_INT: {
+ CFR_SET_VAL(int32_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_UINT: {
+ CFR_SET_VAL(uint32_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_BIGINT: {
+ CFR_SET_VAL(int64_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ CFR_SET_VAL(uint64_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ CFR_SET_VAL_DOUBLE(data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ CFR_SET_VAL(float, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ CFR_SET_VAL(int16_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ CFR_SET_VAL(uint16_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_TINYINT: {
+ CFR_SET_VAL(int8_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ CFR_SET_VAL(uint8_t, data, pCtx, ceil, i, step, notNullElems);
+ break;
+ }
+ default:
+ qError("error input type");
+ }
+
+ if (notNullElems <= 0) {
+ /*
+ * current block may be null value
+ */
+ assert(pCtx->hasNull);
+ } else {
+ GET_RES_INFO(pCtx)->numOfRes += notNullElems;
+ }
+}
+
+static void floor_function(SQLFunctionCtx *pCtx) {
+ void *data = GET_INPUT_DATA_LIST(pCtx);
+
+ int32_t notNullElems = 0;
+
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
+ int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
+
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_INT: {
+ CFR_SET_VAL(int32_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_UINT: {
+ CFR_SET_VAL(uint32_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_BIGINT: {
+ CFR_SET_VAL(int64_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ CFR_SET_VAL(uint64_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ CFR_SET_VAL_DOUBLE(data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ CFR_SET_VAL(float, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ CFR_SET_VAL(int16_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ CFR_SET_VAL(uint16_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_TINYINT: {
+ CFR_SET_VAL(int8_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ CFR_SET_VAL(uint8_t, data, pCtx, floor, i, step, notNullElems);
+ break;
+ }
+ default:
+ qError("error input type");
+ }
+
+ if (notNullElems <= 0) {
+ /*
+ * current block may be null value
+ */
+ assert(pCtx->hasNull);
+ } else {
+ GET_RES_INFO(pCtx)->numOfRes += notNullElems;
+ }
+}
+
+static void round_function(SQLFunctionCtx *pCtx) {
+ void *data = GET_INPUT_DATA_LIST(pCtx);
+
+ int32_t notNullElems = 0;
+
+ int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
+ int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
+
+ switch (pCtx->inputType) {
+ case TSDB_DATA_TYPE_INT: {
+ CFR_SET_VAL(int32_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_UINT: {
+ CFR_SET_VAL(uint32_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ };
+ case TSDB_DATA_TYPE_BIGINT: {
+ CFR_SET_VAL(int64_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ CFR_SET_VAL(uint64_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ CFR_SET_VAL_DOUBLE(data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ CFR_SET_VAL(float, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ CFR_SET_VAL(int16_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ CFR_SET_VAL(uint16_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_TINYINT: {
+ CFR_SET_VAL(int8_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ CFR_SET_VAL(uint8_t, data, pCtx, round, i, step, notNullElems);
+ break;
+ }
+ default:
+ qError("error input type");
+ }
+
+ if (notNullElems <= 0) {
+ /*
+ * current block may be null value
+ */
+ assert(pCtx->hasNull);
+ } else {
+ GET_RES_INFO(pCtx)->numOfRes += notNullElems;
+ }
+}
+
+#undef CFR_SET_VAL
+#undef CFR_SET_VAL_DOUBLE
+
/////////////////////////////////////////////////////////////////////////////////////////////
/*
* function compatible list.
@@ -4221,8 +4501,8 @@ int32_t functionCompatList[] = {
4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
- // tid_tag, derivative, blk_info
- 6, 8, 7,
+ // tid_tag, derivative, blk_info,ceil, floor, round
+ 6, 8, 7, 1, 1, 1
};
SAggFunctionInfo aAggs[] = {{
@@ -4625,7 +4905,7 @@ SAggFunctionInfo aAggs[] = {{
dataBlockRequired,
},
{
- // 33
+ // 33
"_block_dist", // return table id and the corresponding tags for join match and subscribe
TSDB_FUNC_BLKINFO,
TSDB_FUNC_BLKINFO,
@@ -4635,4 +4915,40 @@ SAggFunctionInfo aAggs[] = {{
blockinfo_func_finalizer,
block_func_merge,
dataBlockRequired,
+ },
+ {
+ // 34
+ "ceil",
+ TSDB_FUNC_CEIL,
+ TSDB_FUNC_CEIL,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ ceil_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
+ },
+ {
+ // 35
+ "floor",
+ TSDB_FUNC_FLOOR,
+ TSDB_FUNC_FLOOR,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ floor_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
+ },
+ {
+ // 36
+ "round",
+ TSDB_FUNC_ROUND,
+ TSDB_FUNC_ROUND,
+ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SCALAR,
+ function_setup,
+ round_function,
+ doFinalizer,
+ noop1,
+ dataBlockRequired
}};
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 9000bcdf77369c79db28adcccbf1f07bd7f46a10..982996d70d6e8c05c45425e737b57a08daf331c9 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -16,7 +16,6 @@
#include "qFill.h"
#include "taosmsg.h"
#include "tglobal.h"
-#include "talgo.h"
#include "exception.h"
#include "hash.h"
@@ -39,11 +38,12 @@
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
-
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0}
+#define MULTI_KEY_DELIM "-"
+
#define TIME_WINDOW_COPY(_dst, _src) do {\
(_dst).skey = (_src).skey;\
(_dst).ekey = (_src).ekey;\
@@ -224,11 +224,18 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput);
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput);
static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
static void destroyOperatorInfo(SOperatorInfo* pOperator);
+static void doSetOperatorCompleted(SOperatorInfo* pOperator) {
+ pOperator->status = OP_EXEC_DONE;
+ if (pOperator->pRuntimeEnv != NULL) {
+ setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
+ }
+}
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock);
@@ -242,8 +249,7 @@ static void setCtxTagForJoin(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
static void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr);
static void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes);
static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo,
- SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput,
- int32_t groupIndex);
+ SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t tableGroupId);
SArray* getOrderCheckColumns(SQueryAttr* pQuery);
@@ -399,6 +405,25 @@ static bool isSelectivityWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput
return (numOfSelectivity > 0 && hasTags);
}
+static bool isScalarWithTagsQuery(SQLFunctionCtx *pCtx, int32_t numOfOutput) {
+ bool hasTags = false;
+ int32_t numOfScalar = 0;
+
+ for (int32_t i = 0; i < numOfOutput; ++i) {
+ int32_t functId = pCtx[i].functionId;
+ if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) {
+ hasTags = true;
+ continue;
+ }
+
+ if ((aAggs[functId].status & TSDB_FUNCSTATE_SCALAR) != 0) {
+ numOfScalar++;
+ }
+ }
+
+ return (numOfScalar > 0 && hasTags);
+}
+
static bool isProjQuery(SQueryAttr *pQueryAttr) {
for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
int32_t functId = pQueryAttr->pExpr1[i].base.functionId;
@@ -538,6 +563,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
// add a new result set for a new group
taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pResult, POINTER_BYTES);
+ SResultRowCell cell = {.groupId = tableGroupId, .pRow = pResult};
+ taosArrayPush(pRuntimeEnv->pResultRowArrayList, &cell);
} else {
pResult = *p1;
}
@@ -963,8 +990,6 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t
break;
}
}
-
- return;
}
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
@@ -1326,6 +1351,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = curTs;
pCtx[k].end.val = v2;
+
+ if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
+ if (prevRowIndex == -1) {
+ pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
+ } else {
+ pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
+ }
+
+ pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
+ }
}
} else if (functionId == TSDB_FUNC_TWA) {
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
@@ -1595,6 +1630,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL;
int32_t forwardStep = 0;
int32_t ret = 0;
+ STimeWindow preWin = win;
while (1) {
// null data, failed to allocate more memory buffer
@@ -1609,12 +1645,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
- doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ preWin = win;
int32_t prevEndPos = (forwardStep - 1) * step + startPos;
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
if (startPos < 0) {
- if (win.skey <= pQueryAttr->window.ekey) {
+ if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
@@ -1622,12 +1659,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
}
startPos = pSDataBlock->info.rows - 1;
-
+
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
- doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
}
-
+
break;
}
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
@@ -1921,7 +1958,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde
// set the output buffer for the selectivity + tag query
static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) {
- if (!isSelectivityWithTagsQuery(pCtx, numOfOutput)) {
+ if (!isSelectivityWithTagsQuery(pCtx, numOfOutput) && !isScalarWithTagsQuery(pCtx, numOfOutput)) {
return TSDB_CODE_SUCCESS;
}
@@ -1940,7 +1977,7 @@ static int32_t setCtxTagColumnInfo(SQLFunctionCtx *pCtx, int32_t numOfOutput) {
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
tagLen += pCtx[i].outputBytes;
pTagCtx[num++] = &pCtx[i];
- } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
+ } else if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0 || (aAggs[functionId].status & TSDB_FUNCSTATE_SCALAR) != 0) {
p = &pCtx[i];
} else if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) {
// tag function may be the group by tag column
@@ -2091,9 +2128,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pQueryAttr = pQueryAttr;
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
+ pRuntimeEnv->pResultRowArrayList = taosArrayInit(numOfTables, sizeof(SResultRowCell));
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen);
@@ -2213,6 +2251,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
}
break;
}
+
case OP_StateWindow: {
pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput);
int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType;
@@ -2229,52 +2268,49 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
case OP_Filter: { // todo refactor
int32_t numOfFilterCols = 0;
-// if (pQueryAttr->numOfFilterCols > 0) {
-// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
-// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols);
-// } else {
- if (pQueryAttr->stableQuery) {
- SColumnInfo* pColInfo =
- extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols);
- pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
- pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols);
- freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3);
- } else {
- SColumnInfo* pColInfo =
- extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols);
- pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
- pQueryAttr->numOfOutput, pColInfo, numOfFilterCols);
- freeColumnInfo(pColInfo, pQueryAttr->numOfOutput);
- }
-// }
+ if (pQueryAttr->stableQuery) {
+ SColumnInfo* pColInfo =
+ extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols);
+ pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
+ pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols);
+ freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3);
+ } else {
+ SColumnInfo* pColInfo =
+ extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols);
+ pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1,
+ pQueryAttr->numOfOutput, pColInfo, numOfFilterCols);
+ freeColumnInfo(pColInfo, pQueryAttr->numOfOutput);
+ }
+
break;
}
case OP_Fill: {
SOperatorInfo* pInfo = pRuntimeEnv->proot;
- pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput);
+ pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput, pQueryAttr->multigroupResult);
break;
}
case OP_MultiwayMergeSort: {
- bool groupMix = true;
- if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) {
- groupMix = false;
- }
- pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput,
- 4096, merger, groupMix); // TODO hack it
+ pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, 4096, merger);
break;
}
- case OP_GlobalAggregate: {
+ case OP_GlobalAggregate: { // If fill operator exists, the result rows of different group can not be in the same SSDataBlock.
+ bool multigroupResult = pQueryAttr->multigroupResult;
+ if (pQueryAttr->multigroupResult) {
+ multigroupResult = (pQueryAttr->fillType == TSDB_FILL_NONE);
+ }
+
pRuntimeEnv->proot = createGlobalAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
- pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo);
+ pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo, multigroupResult);
break;
}
case OP_SLimit: {
- pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
- pQueryAttr->numOfExpr3, merger);
+ int32_t num = pRuntimeEnv->proot->numOfOutput;
+ SExprInfo* pExpr = pRuntimeEnv->proot->pExpr;
+ pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pExpr, num, merger, pQueryAttr->multigroupResult);
break;
}
@@ -2283,6 +2319,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
break;
}
+ case OP_Order: {
+ pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order);
+ break;
+ }
+
default: {
assert(0);
}
@@ -2365,6 +2406,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool);
taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult);
+ taosArrayDestroy(pRuntimeEnv->pResultRowArrayList);
pRuntimeEnv->prevResult = NULL;
}
@@ -2379,11 +2421,11 @@ bool isQueryKilled(SQInfo *pQInfo) {
// query has been executed more than tsShellActivityTimer, and the retrieve has not arrived
// abort current query execution.
- if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) &&
+ if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) &&
(!needBuildResAfterQueryComplete(pQInfo))) {
assert(pQInfo->startExecTs != 0);
- qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64
+ qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d ms, abort current query execution, start:%" PRId64
", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec());
return true;
}
@@ -2927,6 +2969,10 @@ void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInf
}
if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) {
+ if (i < (numOfRows - 1)) {
+ all = false;
+ }
+
break;
}
}
@@ -2947,12 +2993,13 @@ void filterRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSingleColumnFilterInf
void filterColRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSDataBlock* pBlock, bool ascQuery) {
int32_t numOfRows = pBlock->info.rows;
- int8_t *p = calloc(numOfRows, sizeof(int8_t));
+ int8_t *p = NULL;
bool all = true;
if (pRuntimeEnv->pTsBuf != NULL) {
- SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
-
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0);
+ p = calloc(numOfRows, sizeof(int8_t));
+
TSKEY* k = (TSKEY*) pColInfoData->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
int32_t offset = ascQuery? i:(numOfRows - i - 1);
@@ -2967,19 +3014,28 @@ void filterColRowsInDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SSDataBlock* pBlock
p[offset] = true;
}
- if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) {
+ if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) {
+ if (i < (numOfRows - 1)) {
+ all = false;
+ }
+
break;
}
}
-
+
// save the cursor status
pRuntimeEnv->current->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf);
} else {
- all = filterExecute(pRuntimeEnv->pQueryAttr->pFilters, numOfRows, p);
+ all = filterExecute(pRuntimeEnv->pQueryAttr->pFilters, numOfRows, &p, pBlock->pBlockStatis, pRuntimeEnv->pQueryAttr->numOfCols);
}
if (!all) {
- doCompactSDataBlock(pBlock, numOfRows, p);
+ if (p) {
+ doCompactSDataBlock(pBlock, numOfRows, p);
+ } else {
+ pBlock->info.rows = 0;
+ pBlock->pBlockStatis = NULL; // clean the block statistics info
+ }
}
tfree(p);
@@ -3028,19 +3084,26 @@ void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFi
}
}
-
-void doSetFilterColInfo(SFilterInfo * pFilters, SSDataBlock* pBlock) {
- for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) {
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, j);
-
- filterSetColFieldData(pFilters, pColInfo->info.colId, pColInfo->pData);
+FORCE_INLINE int32_t getColumnDataFromId(void *param, int32_t id, void **data) {
+ int32_t numOfCols = ((SColumnDataParam *)param)->numOfCols;
+ SArray* pDataBlock = ((SColumnDataParam *)param)->pDataBlock;
+
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, j);
+ if (id == pColInfo->info.colId) {
+ *data = pColInfo->pData;
+ break;
+ }
}
+
+ return TSDB_CODE_SUCCESS;
}
+
int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
*status = BLK_DATA_NO_NEEDED;
- pBlock->pDataBlock = NULL;
+ pBlock->pDataBlock = NULL;
pBlock->pBlockStatis = NULL;
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@@ -3050,6 +3113,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
SQInfo* pQInfo = pRuntimeEnv->qinfo;
SQueryCostInfo* pCost = &pQInfo->summary;
+ pCost->totalBlocks += 1;
+ pCost->totalRows += pBlock->info.rows;
+
if (pRuntimeEnv->pTsBuf != NULL) {
(*status) = BLK_DATA_ALL_NEEDED;
@@ -3081,7 +3147,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
// check if this data block is required to load
if ((*status) != BLK_DATA_ALL_NEEDED) {
bool needFilter = true;
-
+
// the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet,
// the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer
if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) {
@@ -3188,7 +3254,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa
}
if (pQueryAttr->pFilters != NULL) {
- doSetFilterColInfo(pQueryAttr->pFilters, pBlock);
+ SColumnDataParam param = {.numOfCols = pBlock->info.numOfCols, .pDataBlock = pBlock->pDataBlock};
+ filterSetColFieldData(pQueryAttr->pFilters, ¶m, getColumnDataFromId);
}
if (pQueryAttr->pFilters != NULL || pRuntimeEnv->pTsBuf != NULL) {
@@ -3488,12 +3555,11 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl
}
}
- // enough results in data buffer, return
- if (pBlock->info.rows >= threshold) {
- break;
- }
+ // enough results in data buffer, return
+ if (pBlock->info.rows >= threshold) {
+ break;
}
-
+ }
}
static void updateTableQueryInfoForReverseScan(STableQueryInfo *pTableQueryInfo) {
@@ -3567,6 +3633,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
int64_t tid = 0;
+ pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
@@ -3587,7 +3654,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
// set the timestamp output buffer for top/bottom/diff query
int32_t fid = pCtx[i].functionId;
if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) {
- pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
+ if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
}
@@ -3615,14 +3682,46 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
}
}
+
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i);
pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows;
- // re-estabilish output buffer pointer.
+ // set the correct pointer after the memory buffer reallocated.
int32_t functionId = pBInfo->pCtx[i].functionId;
+
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
- pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
+ if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
+ }
+ }
+}
+
+void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) {
+ bool needCopyTs = false;
+ int32_t tsNum = 0;
+ char *src = NULL;
+ for (int32_t i = 0; i < numOfOutput; i++) {
+ int32_t functionId = pCtx[i].functionId;
+ if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
+ needCopyTs = true;
+ if (i > 0 && pCtx[i-1].functionId == TSDB_FUNC_TS_DUMMY){
+ SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data
+ src = pColRes->pData;
+ }
+ }else if(functionId == TSDB_FUNC_TS_DUMMY) {
+ tsNum++;
+ }
+ }
+
+ if (!needCopyTs) return;
+ if (tsNum < 2) return;
+ if (src == NULL) return;
+
+ for (int32_t i = 0; i < numOfOutput; i++) {
+ int32_t functionId = pCtx[i].functionId;
+ if(functionId == TSDB_FUNC_TS_DUMMY) {
+ SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i);
+ memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows);
}
}
}
@@ -3640,8 +3739,6 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) {
}
}
-
-
void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) {
for (int32_t j = 0; j < size; ++j) {
SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
@@ -3822,7 +3919,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
}
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
- pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
+ if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
if (!pResInfo->initialized) {
@@ -3883,7 +3980,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int32_t functionId = pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
- pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
+ if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
}
/*
@@ -4154,6 +4251,7 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
// refactor : extract method
SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
+
//add condition (pBlock->info.rows >= 1) just to runtime happy
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) {
STimeWindow* w = &pBlock->info.window;
@@ -4186,26 +4284,60 @@ static void updateNumOfRowsInResultRows(SQueryRuntimeEnv* pRuntimeEnv, SQLFuncti
}
}
-static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data) {
+static int32_t compressQueryColData(SColumnInfoData *pColRes, int32_t numOfRows, char *data, int8_t compressed) {
+ int32_t colSize = pColRes->info.bytes * numOfRows;
+ return (*(tDataTypes[pColRes->info.type].compFunc))(pColRes->pData, colSize, numOfRows, data,
+ colSize + COMP_OVERFLOW_BYTES, compressed, NULL, 0);
+}
+
+static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data, int8_t compressed, int32_t *compLen) {
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
SSDataBlock* pRes = pRuntimeEnv->outputBuf;
+ int32_t *compSizes = NULL;
+ int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput;
+
+ if (compressed) {
+ compSizes = tcalloc(numOfCols, sizeof(int32_t));
+ }
+
if (pQueryAttr->pExpr2 == NULL) {
- for (int32_t col = 0; col < pQueryAttr->numOfOutput; ++col) {
+ for (int32_t col = 0; col < numOfCols; ++col) {
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
- memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows);
- data += pColRes->info.bytes * pRes->info.rows;
+ if (compressed) {
+ compSizes[col] = compressQueryColData(pColRes, pRes->info.rows, data, compressed);
+ data += compSizes[col];
+ *compLen += compSizes[col];
+ compSizes[col] = htonl(compSizes[col]);
+ } else {
+ memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows);
+ data += pColRes->info.bytes * pRes->info.rows;
+ }
}
} else {
- for (int32_t col = 0; col < pQueryAttr->numOfExpr2; ++col) {
+ for (int32_t col = 0; col < numOfCols; ++col) {
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
- memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
- data += pColRes->info.bytes * numOfRows;
+ if (compressed) {
+ compSizes[col] = htonl(compressQueryColData(pColRes, numOfRows, data, compressed));
+ data += compSizes[col];
+ *compLen += compSizes[col];
+ compSizes[col] = htonl(compSizes[col]);
+ } else {
+ memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
+ data += pColRes->info.bytes * numOfRows;
+ }
}
}
+ if (compressed) {
+ memmove(data, (char *)compSizes, numOfCols * sizeof(int32_t));
+ data += numOfCols * sizeof(int32_t);
+
+ tfree(compSizes);
+ }
+
int32_t numOfTables = (int32_t) taosHashGetSize(pRuntimeEnv->pTableRetrieveTsMap);
*(int32_t*)data = htonl(numOfTables);
data += sizeof(int32_t);
@@ -4227,21 +4359,22 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
}
qDebug("QInfo:0x%"PRIx64" set %d subscribe info", pQInfo->qId, total);
+
// Check if query is completed or not for stable query or normal table query respectively.
if (Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) && pRuntimeEnv->proot->status == OP_EXEC_DONE) {
setQueryStatus(pRuntimeEnv, QUERY_OVER);
}
}
-int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutput, int32_t capacity) {
- void** p = calloc(pFillInfo->numOfCols, POINTER_BYTES);
+int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutput, int32_t capacity, void** p) {
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pOutput->pDataBlock, i);
- p[i] = pColInfoData->pData;
+ p[i] = pColInfoData->pData + (pColInfoData->info.bytes * pOutput->info.rows);
}
- pOutput->info.rows = (int32_t)taosFillResultDataBlock(pFillInfo, p, capacity);
- tfree(p);
+ int32_t numOfRows = (int32_t)taosFillResultDataBlock(pFillInfo, p, capacity - pOutput->info.rows);
+ pOutput->info.rows += numOfRows;
+
return pOutput->info.rows;
}
@@ -4723,7 +4856,6 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
pQueryAttr->tsdb = tsdb;
-
if (tsdb != NULL) {
int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery);
if (code != TSDB_CODE_SUCCESS) {
@@ -5285,11 +5417,12 @@ static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) {
static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) {
SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param;
taosArrayDestroy(pInfo->orderColumnList);
+ pInfo->pRes = destroyOutputBuf(pInfo->pRes);
tfree(pInfo->prevRow);
}
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream,
- SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo) {
+ SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp) {
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
pInfo->resultRowFactor =
@@ -5297,15 +5430,14 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
- pInfo->pMerge = param;
- pInfo->bufCapacity = 4096;
- pInfo->udfInfo = pUdfInfo;
-
- pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor);
- pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
-
- pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
- pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
+ pInfo->multiGroupResults = groupResultMixedUp;
+ pInfo->pMerge = param;
+ pInfo->bufCapacity = 4096;
+ pInfo->udfInfo = pUdfInfo;
+ pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor);
+ pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
+ pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
+ pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
// TODO refactor
int32_t len = 0;
@@ -5358,17 +5490,15 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
}
SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SExprInfo *pExpr, int32_t numOfOutput,
- int32_t numOfRows, void *merger, bool groupMix) {
+ int32_t numOfRows, void *merger) {
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
- pInfo->pMerge = merger;
- pInfo->groupMix = groupMix;
- pInfo->bufCapacity = numOfRows;
-
+ pInfo->pMerge = merger;
+ pInfo->bufCapacity = numOfRows;
pInfo->orderColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
- pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows);
+ pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows);
- {
+ { // todo extract method to create prev compare buffer
int32_t len = 0;
for(int32_t i = 0; i < numOfOutput; ++i) {
len += pExpr[i].base.colBytes;
@@ -5376,8 +5506,8 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len));
- int32_t offset = POINTER_BYTES * numOfCols;
+ int32_t offset = POINTER_BYTES * numOfCols;
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
@@ -5393,12 +5523,120 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
pOperator->status = OP_IN_EXECUTING;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
- pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols;
+ pOperator->numOfOutput = numOfOutput;
+ pOperator->pExpr = pExpr;
pOperator->exec = doMultiwayMergeSort;
pOperator->cleanup = destroyGlobalAggOperatorInfo;
return pOperator;
}
+static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) {
+ assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols);
+
+ int32_t numOfCols = pSrc->info.numOfCols;
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
+ SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i);
+
+ int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes;
+ char* tmp = realloc(pCol2->pData, newSize);
+ if (tmp != NULL) {
+ pCol2->pData = tmp;
+ int32_t offset = pCol2->info.bytes * pDest->info.rows;
+ memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes);
+ } else {
+ return TSDB_CODE_VND_OUT_OF_MEMORY;
+ }
+ }
+
+ pDest->info.rows += pSrc->info.rows;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static SSDataBlock* doSort(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SOrderOperatorInfo* pInfo = pOperator->info;
+
+ SSDataBlock* pBlock = NULL;
+ while(1) {
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
+ pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
+ publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
+
+ // start to flush data into disk and try do multiway merge sort
+ if (pBlock == NULL) {
+ doSetOperatorCompleted(pOperator);
+ break;
+ }
+
+ int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock);
+ if (code != TSDB_CODE_SUCCESS) {
+ // todo handle error
+ }
+ }
+
+ int32_t numOfCols = pInfo->pDataBlock->info.numOfCols;
+ void** pCols = calloc(numOfCols, POINTER_BYTES);
+ SSchema* pSchema = calloc(numOfCols, sizeof(SSchema));
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i);
+ pCols[i] = p1->pData;
+ pSchema[i].colId = p1->info.colId;
+ pSchema[i].bytes = p1->info.bytes;
+ pSchema[i].type = (uint8_t) p1->info.type;
+ }
+
+ __compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order);
+ taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp);
+
+ tfree(pCols);
+ tfree(pSchema);
+ return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL;
+}
+
+SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) {
+ SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo));
+
+ {
+ SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock));
+ pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData));
+ for(int32_t i = 0; i < numOfOutput; ++i) {
+ SColumnInfoData col = {{0}};
+ col.info.colId = pExpr[i].base.colInfo.colId;
+ col.info.bytes = pExpr[i].base.colBytes;
+ col.info.type = pExpr[i].base.colType;
+ taosArrayPush(pDataBlock->pDataBlock, &col);
+
+ if (col.info.colId == pOrderVal->orderColId) {
+ pInfo->colIndex = i;
+ }
+ }
+
+ pDataBlock->info.numOfCols = numOfOutput;
+ pInfo->order = pOrderVal->order;
+ pInfo->pDataBlock = pDataBlock;
+ }
+
+ SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
+ pOperator->name = "InMemoryOrder";
+ pOperator->operatorType = OP_Order;
+ pOperator->blockingOptr = true;
+ pOperator->status = OP_IN_EXECUTING;
+ pOperator->info = pInfo;
+ pOperator->exec = doSort;
+ pOperator->cleanup = destroyOrderOperatorInfo;
+ pOperator->pRuntimeEnv = pRuntimeEnv;
+
+ appendUpstream(pOperator, upstream);
+ return pOperator;
+}
+
static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) {
return pTableScanInfo->order;
}
@@ -5443,8 +5681,7 @@ static SSDataBlock* doAggregate(void* param, bool* newgroup) {
doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock);
}
- pOperator->status = OP_EXEC_DONE;
- setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
+ doSetOperatorCompleted(pOperator);
finalizeQueryResult(pOperator, pInfo->pCtx, &pInfo->resultRowInfo, pInfo->rowCellInfoOffset);
pInfo->pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
@@ -5520,7 +5757,7 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->pRes);
if (pInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
}
return pInfo->pRes;
@@ -5561,6 +5798,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) {
+ copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return pRes;
}
@@ -5586,8 +5824,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
if (*newgroup) {
if (pRes->info.rows > 0) {
pProjectInfo->existDataBlock = pBlock;
- clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
- return pInfo->pRes;
+ break;
} else { // init output buffer for a new group data
for (int32_t j = 0; j < pOperator->numOfOutput; ++j) {
aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]);
@@ -5617,7 +5854,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
break;
}
}
-
+ copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}
@@ -5638,8 +5875,7 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
return NULL;
}
@@ -5667,8 +5903,7 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
pBlock->info.rows = (int32_t)(pInfo->limit - pInfo->total);
pInfo->total = pInfo->limit;
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
} else {
pInfo->total += pBlock->info.rows;
}
@@ -5703,8 +5938,7 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
}
}
- setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
return NULL;
}
@@ -5719,9 +5953,8 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
if (pOperator->status == OP_RES_TO_RETURN) {
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
-
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
}
return pIntervalInfo->pRes;
@@ -5762,7 +5995,7 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
}
return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes;
@@ -5781,7 +6014,7 @@ static SSDataBlock* doAllIntervalAgg(void* param, bool* newgroup) {
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
}
return pIntervalInfo->pRes;
@@ -5838,11 +6071,16 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
if (pOperator->status == OP_RES_TO_RETURN) {
+ int64_t st = taosGetTimestampUs();
+
copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset);
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
}
+ SQInfo* pQInfo = pRuntimeEnv->qinfo;
+ pQInfo->summary.firstStageMergeTime += (taosGetTimestampUs() - st);
+
return pIntervalInfo->pRes;
}
@@ -5930,17 +6168,18 @@ static SSDataBlock* doAllSTableIntervalAgg(void* param, bool* newgroup) {
doCloseAllTimeWindow(pRuntimeEnv);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
+ int64_t st = taosGetTimestampUs();
copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset);
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
+ SQInfo* pQInfo = pRuntimeEnv->qinfo;
+ pQInfo->summary.firstStageMergeTime += (taosGetTimestampUs() - st);
+
return pIntervalInfo->pRes;
}
-
-
-
static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
STableQueryInfo* item = pRuntimeEnv->current;
@@ -6073,6 +6312,7 @@ static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) {
return pBInfo->pRes->info.rows == 0? NULL:pBInfo->pRes;
}
+
static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -6186,6 +6426,7 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
if (!pRuntimeEnv->pQueryAttr->stableQuery) {
sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes);
}
+
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes);
if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
@@ -6195,34 +6436,48 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
return pInfo->binfo.pRes;
}
-static SSDataBlock* doFill(void* param, bool* newgroup) {
- SOperatorInfo* pOperator = (SOperatorInfo*) param;
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
+static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo *pInfo, SQueryRuntimeEnv* pRuntimeEnv, bool* newgroup) {
+ pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
+ int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
+ taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
- SFillOperatorInfo *pInfo = pOperator->info;
- SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv;
+ taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
+ taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
+
+ doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
+ pInfo->existNewGroupBlock = NULL;
+ *newgroup = true;
+}
+static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) {
if (taosFillHasMoreResults(pInfo->pFillInfo)) {
*newgroup = false;
- doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
- return pInfo->pRes;
+ doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity, pInfo->p);
+ if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult)) {
+ return;
+ }
}
// handle the cached new group data block
if (pInfo->existNewGroupBlock) {
- pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
- int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
- taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
+ doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup);
+ }
+}
- taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
- taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
+static SSDataBlock* doFill(void* param, bool* newgroup) {
+ SOperatorInfo* pOperator = (SOperatorInfo*) param;
- doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
- pInfo->existNewGroupBlock = NULL;
- *newgroup = true;
- return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
+ SFillOperatorInfo *pInfo = pOperator->info;
+ pInfo->pRes->info.rows = 0;
+
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv;
+ doHandleRemainBlockFromNewGroup(pInfo, pRuntimeEnv, newgroup);
+ if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) {
+ return pInfo->pRes;
}
while(1) {
@@ -6238,8 +6493,8 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
pInfo->existNewGroupBlock = pBlock;
*newgroup = false;
- // fill the previous group data block
- // before handle a new data block, close the fill operation for previous group data block
+ // Fill the previous group data block, before handle the data block of new group.
+ // Close the fill operation for previous group data block
taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey);
} else {
if (pBlock == NULL) {
@@ -6251,36 +6506,35 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey);
} else {
pInfo->totalInputRows += pBlock->info.rows;
-
- int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey
- : */pBlock->info.window.ekey;
-
- taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, ekey);
+ taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, pBlock->info.window.ekey);
taosFillSetInputDataBlock(pInfo->pFillInfo, pBlock);
}
}
- doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
- if (pInfo->pRes->info.rows > 0) { // current group has no more result to return
- return pInfo->pRes;
- } else if (pInfo->existNewGroupBlock) { // try next group
- pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
- int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey
- :*/ pInfo->existNewGroupBlock->info.window.ekey;
- taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
+ doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
- taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
- taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
+ // current group has no more result to return
+ if (pInfo->pRes->info.rows > 0) {
+ // 1. The result in current group not reach the threshold of output result, continue
+ // 2. If multiple group results existing in one SSDataBlock is not allowed, return immediately
+ if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL || (!pInfo->multigroupResult)) {
+ return pInfo->pRes;
+ }
- doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
- pInfo->existNewGroupBlock = NULL;
- *newgroup = true;
+ doHandleRemainBlockFromNewGroup(pInfo, pRuntimeEnv, newgroup);
+ if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) {
+ return pInfo->pRes;
+ }
+ } else if (pInfo->existNewGroupBlock) { // try next group
+ assert(pBlock != NULL);
+ doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup);
- return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL;
+ if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) {
+ return pInfo->pRes;
+ }
} else {
return NULL;
}
- // return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}
}
@@ -6381,6 +6635,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) {
SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param;
pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo);
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
+ tfree(pInfo->p);
}
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) {
@@ -6399,6 +6654,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
+static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
+ SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param;
+ pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock);
+}
+
static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) {
SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param;
doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols);
@@ -6407,6 +6667,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) {
static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param;
taosHashCleanup(pInfo->pSet);
+ tfree(pInfo->buf);
+ taosArrayDestroy(pInfo->pDistinctDataInfo);
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
}
@@ -6717,10 +6979,10 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
return pOperator;
}
-SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
- int32_t numOfOutput) {
+SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult) {
SFillOperatorInfo* pInfo = calloc(1, sizeof(SFillOperatorInfo));
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
+ pInfo->multigroupResult = multigroupResult;
{
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@@ -6735,6 +6997,8 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
taosCreateFillInfo(pQueryAttr->order.order, w.skey, 0, (int32_t)pRuntimeEnv->resultInfo.capacity, numOfOutput,
pQueryAttr->interval.sliding, pQueryAttr->interval.slidingUnit,
(int8_t)pQueryAttr->precision, pQueryAttr->fillType, pColInfo, pRuntimeEnv->qinfo);
+
+ pInfo->p = calloc(pInfo->pFillInfo->numOfCols, POINTER_BYTES);
}
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
@@ -6747,7 +7011,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
pOperator->numOfOutput = numOfOutput;
pOperator->info = pInfo;
pOperator->pRuntimeEnv = pRuntimeEnv;
-
pOperator->exec = doFill;
pOperator->cleanup = destroySFillOperatorInfo;
@@ -6755,7 +7018,7 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
return pOperator;
}
-SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger) {
+SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger, bool multigroupResult) {
SSLimitOperatorInfo* pInfo = calloc(1, sizeof(SSLimitOperatorInfo));
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
@@ -6763,9 +7026,11 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
pInfo->orderColumnList = getResultGroupCheckColumns(pQueryAttr);
pInfo->slimit = pQueryAttr->slimit;
pInfo->limit = pQueryAttr->limit;
-
+ pInfo->capacity = pRuntimeEnv->resultInfo.capacity;
+ pInfo->threshold = (int64_t)(pInfo->capacity * 0.8);
+ pInfo->currentOffset = pQueryAttr->limit.offset;
pInfo->currentGroupOffset = pQueryAttr->slimit.offset;
- pInfo->currentOffset = pQueryAttr->limit.offset;
+ pInfo->multigroupResult= multigroupResult;
// TODO refactor
int32_t len = 0;
@@ -6773,10 +7038,10 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
len += pExpr[i].base.resBytes;
}
- int32_t numOfCols = pInfo->orderColumnList != NULL? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
+ int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len));
- int32_t offset = POINTER_BYTES * numOfCols;
+ int32_t offset = POINTER_BYTES * numOfCols;
for(int32_t i = 0; i < numOfCols; ++i) {
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
@@ -6784,6 +7049,8 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
offset += pExpr[index->colIndex].base.resBytes;
}
+ pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
+
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "SLimitOperator";
@@ -6921,6 +7188,10 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_QID(pRuntimeEnv), count);
}
+ if (pOperator->status == OP_EXEC_DONE) {
+ setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
+ }
+
pRes->info.rows = count;
return (pRes->info.rows == 0)? NULL:pInfo->pRes;
}
@@ -6949,6 +7220,53 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
return pOperator;
}
+static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) {
+ if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) {
+ // distinct info already inited
+ return true;
+ }
+ for (int i = 0; i < pOperator->numOfOutput; i++) {
+ pInfo->totalBytes += pOperator->pExpr[i].base.colBytes;
+ }
+ for (int i = 0; i < pOperator->numOfOutput; i++) {
+ int numOfBlock = (int)(taosArrayGetSize(pBlock->pDataBlock));
+ assert(i < numOfBlock);
+ for (int j = 0; j < numOfBlock; j++) {
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j);
+ if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) {
+ SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes};
+ taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
+ }
+ }
+ }
+ pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput);
+ pInfo->buf = calloc(1, pInfo->totalBytes);
+ return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false;
+}
+
+static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) {
+ char *p = pInfo->buf;
+ memset(p, 0, pInfo->totalBytes);
+
+ for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) {
+ SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i);
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index);
+ char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId;
+ if (isNull(val, pDistDataInfo->type)) {
+ p += pDistDataInfo->bytes;
+ continue;
+ }
+ if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) {
+ memcpy(p, varDataVal(val), varDataLen(val));
+ p += varDataLen(val);
+ } else {
+ memcpy(p, val, pDistDataInfo->bytes);
+ p += pDistDataInfo->bytes;
+ }
+ memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM));
+ p += strlen(MULTI_KEY_DELIM);
+ }
+}
static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
@@ -6956,95 +7274,76 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
return NULL;
}
-
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
-
pRes->info.rows = 0;
SSDataBlock* pBlock = NULL;
+
while(1) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
+ doSetOperatorCompleted(pOperator);
break;
}
- if (pInfo->colIndex == -1) {
- for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i);
- if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) {
- pInfo->colIndex = i;
- break;
- }
- }
- }
- if (pInfo->colIndex == -1) {
- setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
+ if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) {
+ doSetOperatorCompleted(pOperator);
+ break;
}
- SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex);
-
- int16_t bytes = pColInfoData->info.bytes;
- int16_t type = pColInfoData->info.type;
-
- // ensure the output buffer size
- SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0);
+ // ensure result output buf
if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) {
int32_t newSize = pRes->info.rows + pBlock->info.rows;
- char* tmp = realloc(pResultColInfoData->pData, newSize * bytes);
- if (tmp == NULL) {
- return NULL;
- } else {
- pResultColInfoData->pData = tmp;
- pInfo->outputCapacity = newSize;
+ for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) {
+ SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i);
+ SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i);
+ char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes);
+ if (tmp == NULL) {
+ return NULL;
+ } else {
+ pResultColInfoData->pData = tmp;
+ }
}
+ pInfo->outputCapacity = newSize;
}
- for(int32_t i = 0; i < pBlock->info.rows; ++i) {
- char* val = ((char*)pColInfoData->pData) + bytes * i;
- if (isNull(val, type)) {
- continue;
- }
- char* p = val;
- size_t keyLen = 0;
- if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) {
- tstr* var = (tstr*)(val);
- p = var->data;
- keyLen = varDataLen(var);
- } else {
- keyLen = bytes;
- }
+ for (int32_t i = 0; i < pBlock->info.rows; i++) {
+ buildMultiDistinctKey(pInfo, pBlock, i);
+ if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) {
+ int32_t dummy;
+ taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy));
+ for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) {
+ SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src
+ SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist
- int dummy;
- void* res = taosHashGet(pInfo->pSet, p, keyLen);
- if (res == NULL) {
- taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy));
- char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows;
- memcpy(start, val, bytes);
+ char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i;
+ char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows;
+ memcpy(start, val, pDistDataInfo->bytes);
+ }
pRes->info.rows += 1;
- }
+ }
}
if (pRes->info.rows >= pInfo->threshold) {
break;
}
}
-
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo));
- pInfo->colIndex = -1;
- pInfo->threshold = 10000000; // distinct result threshold
- pInfo->outputCapacity = 4096;
- pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK);
+ pInfo->totalBytes = 0;
+ pInfo->buf = NULL;
+ pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold
+ pInfo->outputCapacity = 4096;
+ pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo));
+ pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity);
+
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "DistinctOperator";
@@ -7243,19 +7542,20 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pQueryMsg->order = htons(pQueryMsg->order);
pQueryMsg->orderColId = htons(pQueryMsg->orderColId);
pQueryMsg->queryType = htonl(pQueryMsg->queryType);
- pQueryMsg->tagNameRelType = htons(pQueryMsg->tagNameRelType);
pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols);
pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput);
pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols);
+
pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen);
pQueryMsg->colCondLen = htons(pQueryMsg->colCondLen);
+
pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset);
pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder);
+
pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags);
- pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen);
pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput);
pQueryMsg->sqlstrLen = htonl(pQueryMsg->sqlstrLen);
pQueryMsg->prevResultLen = htonl(pQueryMsg->prevResultLen);
@@ -7346,8 +7646,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pMsg += sizeof(SSqlExpr);
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
- pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType);
- pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen);
+ pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
+ pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
pExprMsg->param[j].pz = pMsg;
@@ -7394,8 +7694,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pMsg += sizeof(SSqlExpr);
for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) {
- pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType);
- pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen);
+ pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType);
+ pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen);
if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) {
pExprMsg->param[j].pz = pMsg;
@@ -7499,17 +7799,6 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
pMsg += pQueryMsg->prevResultLen;
}
- if (pQueryMsg->tbnameCondLen > 0) {
- param->tbnameCond = calloc(1, pQueryMsg->tbnameCondLen + 1);
- if (param->tbnameCond == NULL) {
- code = TSDB_CODE_QRY_OUT_OF_MEMORY;
- goto _cleanup;
- }
-
- strncpy(param->tbnameCond, pMsg, pQueryMsg->tbnameCondLen);
- pMsg += pQueryMsg->tbnameCondLen;
- }
-
//skip ts buf
if ((pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen) > 0) {
pMsg = (char *)pQueryMsg + pQueryMsg->tsBuf.tsOffset + pQueryMsg->tsBuf.tsLen;
@@ -7890,7 +8179,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
return TSDB_CODE_SUCCESS;
}
-int32_t createQueryFilter(char *data, uint16_t len, SFilterInfo** pFilters) {
+int32_t createQueryFilter(char *data, uint16_t len, void** pFilters) {
tExprNode* expr = NULL;
TRY(TSDB_MAX_TAG_CONDITIONS) {
@@ -8144,7 +8433,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) {
}
SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs,
- SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, SFilterInfo* pFilters, int32_t vgId,
+ SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, void* pFilters, int32_t vgId,
char* sql, uint64_t qId, SUdfInfo* pUdfInfo) {
int16_t numOfCols = pQueryMsg->numOfCols;
int16_t numOfOutput = pQueryMsg->numOfOutput;
@@ -8155,6 +8444,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
}
pQInfo->qId = qId;
+ pQInfo->startExecTs = 0;
pQInfo->runtimeEnv.pUdfInfo = pUdfInfo;
@@ -8393,7 +8683,6 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SArray* prevResult = NULL;
if (prevResultLen > 0) {
prevResult = interResFromBinary(param->prevResult, prevResultLen);
-
pRuntimeEnv->prevResult = prevResult;
}
@@ -8540,7 +8829,7 @@ void freeQInfo(SQInfo *pQInfo) {
tfree(pQInfo);
}
-int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
+int32_t doDumpQueryResult(SQInfo *pQInfo, char *data, int8_t compressed, int32_t *compLen) {
// the remained number of retrieved rows, not the interpolated result
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
@@ -8583,7 +8872,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
setQueryStatus(pRuntimeEnv, QUERY_OVER);
}
} else {
- doCopyQueryResultToMsg(pQInfo, (int32_t)pRuntimeEnv->outputBuf->info.rows, data);
+ doCopyQueryResultToMsg(pQInfo, (int32_t)pRuntimeEnv->outputBuf->info.rows, data, compressed, compLen);
}
qDebug("QInfo:0x%"PRIx64" current numOfRes rows:%d, total:%" PRId64, pQInfo->qId,
@@ -8669,6 +8958,30 @@ int32_t checkForQueryBuf(size_t numOfTables) {
return TSDB_CODE_QRY_NOT_ENOUGH_BUFFER;
}
+bool checkNeedToCompressQueryCol(SQInfo *pQInfo) {
+ SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
+ SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
+
+ SSDataBlock* pRes = pRuntimeEnv->outputBuf;
+
+ if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) <= 0) {
+ return false;
+ }
+
+ int32_t numOfRows = pQueryAttr->pExpr2 ? GET_NUM_OF_RESULTS(pRuntimeEnv) : pRes->info.rows;
+ int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput;
+
+ for (int32_t col = 0; col < numOfCols; ++col) {
+ SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
+ int32_t colSize = pColRes->info.bytes * numOfRows;
+ if (NEEDTO_COMPRESS_QUERY(colSize)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
void releaseQueryBuf(size_t numOfTables) {
if (tsQueryBufferSizeBytes < 0) {
return;
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index cc47cc824bcf59f0839bc5a439d4d15b89e030ea..5994099a0d1ad6b1a87aa19edb6151680128f6df 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -12,7 +12,6 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#include "qExtbuffer.h"
#include "os.h"
#include "qAggMain.h"
#include "queryLog.h"
@@ -21,6 +20,8 @@
#include "taosmsg.h"
#include "tulog.h"
#include "qExecutor.h"
+#include "qExtbuffer.h"
+#include "tcompare.h"
#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \
(data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes)
@@ -1102,3 +1103,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
destroyColumnModel(pDesc->pColumnModel);
tfree(pDesc);
}
+
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
+ assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
+
+ int32_t bytes = pSchema[index].bytes;
+ int32_t size = bytes + sizeof(int32_t);
+
+ char* buf = calloc(1, size * numOfRows);
+
+ for(int32_t i = 0; i < numOfRows; ++i) {
+ char* dest = buf + size * i;
+ memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
+ *(int32_t*)(dest+bytes) = i;
+ }
+
+ qsort(buf, numOfRows, size, compareFn);
+
+ int32_t prevLength = 0;
+ char* p = NULL;
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ int32_t bytes1 = pSchema[i].bytes;
+
+ if (i == index) {
+ for(int32_t j = 0; j < numOfRows; ++j){
+ char* src = buf + (j * size);
+ char* dest = ((char*)pCols[i]) + (j * bytes1);
+ memcpy(dest, src, bytes1);
+ }
+ } else {
+ // make sure memory buffer is enough
+ if (prevLength < bytes1) {
+ char *tmp = realloc(p, bytes1 * numOfRows);
+ assert(tmp);
+
+ p = tmp;
+ prevLength = bytes1;
+ }
+
+ memcpy(p, pCols[i], bytes1 * numOfRows);
+
+ for(int32_t j = 0; j < numOfRows; ++j){
+ char* dest = ((char*)pCols[i]) + bytes1 * j;
+
+ int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
+ char* src = p + (newPos * bytes1);
+ memcpy(dest, src, bytes1);
+ }
+ }
+ }
+
+ tfree(buf);
+ tfree(p);
+}
diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c
index 1a86bbae36697224585522b5be836c61394c7cc4..cdcc164152dddbc34d03508a2bdd7379d6e50892 100644
--- a/src/query/src/qFill.c
+++ b/src/query/src/qFill.c
@@ -430,7 +430,7 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput)
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
pFillInfo->pData[i] = pColData->pData;
- if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer
+ if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
assert (pTag->col.colId == pCol->col.colId);
memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy??
diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c
index 1171bb089641909ad8a18ac91866736d23a2eea8..c7a7ea963d5635c76030d2199ac99a60924d99a7 100644
--- a/src/query/src/qFilter.c
+++ b/src/query/src/qFilter.c
@@ -33,7 +33,9 @@ OptrStr gOptrStr[] = {
{TSDB_RELATION_IN, "in"},
{TSDB_RELATION_AND, "and"},
{TSDB_RELATION_OR, "or"},
- {TSDB_RELATION_NOT, "not"}
+ {TSDB_RELATION_NOT, "not"},
+ {TSDB_RELATION_MATCH, "match"},
+ {TSDB_RELATION_NMATCH, "nmatch"},
};
static FORCE_INLINE int32_t filterFieldColDescCompare(const void *desc1, const void *desc2) {
@@ -156,8 +158,8 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) {
__compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal,
compareDoubleVal, compareLenPrefixedStr, compareStrPatternComp, compareFindItemInSet, compareWStrPatternComp,
compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val,
- setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8
-};
+ setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexCompMatch, compareStrRegexCompNMatch
+};
int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
int8_t comparFn = 0;
@@ -195,7 +197,11 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
case TSDB_DATA_TYPE_FLOAT: comparFn = 4; break;
case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break;
case TSDB_DATA_TYPE_BINARY: {
- if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */
+ if (optr == TSDB_RELATION_MATCH) {
+ comparFn = 19;
+ } else if (optr == TSDB_RELATION_NMATCH) {
+ comparFn = 20;
+ } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */
comparFn = 7;
} else if (optr == TSDB_RELATION_IN) {
comparFn = 8;
@@ -207,7 +213,11 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
}
case TSDB_DATA_TYPE_NCHAR: {
- if (optr == TSDB_RELATION_LIKE) {
+ if (optr == TSDB_RELATION_MATCH) {
+ comparFn = 19;
+ } else if (optr == TSDB_RELATION_NMATCH) {
+ comparFn = 20;
+ } else if (optr == TSDB_RELATION_LIKE) {
comparFn = 9;
} else if (optr == TSDB_RELATION_IN) {
comparFn = 8;
@@ -927,7 +937,7 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint16_t unitIdx) {
return TSDB_CODE_SUCCESS;
}
-int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType) {
+int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint32_t tType, bool tolower) {
SBufferReader br = tbufInitReader(buf, len, false);
uint32_t sType = tbufReadUint32(&br);
SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(tType), true, false);
@@ -1103,6 +1113,10 @@ int32_t filterConvertSetFromBinary(void **q, const char *buf, int32_t len, uint3
}
t = varDataLen(tmp);
pvar = varDataVal(tmp);
+
+ if (tolower) {
+ strntolower_s(pvar, pvar, (int32_t)t);
+ }
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@@ -1147,7 +1161,7 @@ int32_t filterAddGroupUnitFromNode(SFilterInfo *info, tExprNode* tree, SArray *g
if (tree->_node.optr == TSDB_RELATION_IN && (!IS_VAR_DATA_TYPE(type))) {
void *data = NULL;
- filterConvertSetFromBinary((void **)&data, var->pz, var->nLen, type);
+ filterConvertSetFromBinary((void **)&data, var->pz, var->nLen, type, false);
CHK_LRET(data == NULL, TSDB_CODE_QRY_APP_ERROR, "failed to convert in param");
if (taosHashGetSize((SHashObj *)data) <= 0) {
@@ -1474,19 +1488,6 @@ _return:
return code;
}
-#if 0
-int32_t filterInitUnitFunc(SFilterInfo *info) {
- for (uint16_t i = 0; i < info->unitNum; ++i) {
- SFilterUnit* unit = &info->units[i];
-
- info->cunits[i].func = getComparFunc(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr);
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
-
-
void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) {
if (qDebugFlag & DEBUG_DEBUG) {
CHK_LRETV(info == NULL, "%s - FilterInfo: EMPTY", msg);
@@ -1521,7 +1522,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
int32_t type = FILTER_UNIT_DATA_TYPE(unit);
int32_t len = 0;
int32_t tlen = 0;
- char str[256] = {0};
+ char str[512] = {0};
SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit);
SSchema *sch = left->desc;
@@ -1539,6 +1540,24 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
strcat(str, "NULL");
}
strcat(str, "]");
+
+ if (unit->compare.optr2) {
+ strcat(str, " && ");
+ sprintf(str + strlen(str), "[%d][%s] %s [", sch->colId, sch->name, gOptrStr[unit->compare.optr2].str);
+
+ if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != TSDB_RELATION_IN) {
+ SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit);
+ char *data = right->data;
+ if (IS_VAR_DATA_TYPE(type)) {
+ tlen = varDataLen(data);
+ data += VARSTR_HEADER_SIZE;
+ }
+ converToStr(str + strlen(str), type, data, tlen > 32 ? 32 : tlen, &tlen);
+ } else {
+ strcat(str, "NULL");
+ }
+ strcat(str, "]");
+ }
qDebug("%s", str); //TODO
}
@@ -1556,37 +1575,63 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
return;
}
- qDebug("%s - RANGE info:", msg);
-
- qDebug("RANGE Num:%u", info->colRangeNum);
- for (uint16_t i = 0; i < info->colRangeNum; ++i) {
- SFilterRangeCtx *ctx = info->colRange[i];
- qDebug("Column ID[%d] RANGE: isnull[%d],notnull[%d],range[%d]", ctx->colId, ctx->isnull, ctx->notnull, ctx->isrange);
- if (ctx->isrange) {
- SFilterRangeNode *r = ctx->rs;
- while (r) {
- char str[256] = {0};
- int32_t tlen = 0;
- if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) {
- strcat(str,"(NULL)");
- } else {
- FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
- converToStr(str + strlen(str), ctx->type, &r->ra.s, tlen > 32 ? 32 : tlen, &tlen);
- FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
- }
- strcat(str, " - ");
- if (FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_NULL)) {
- strcat(str, "(NULL)");
- } else {
- FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
- converToStr(str + strlen(str), ctx->type, &r->ra.e, tlen > 32 ? 32 : tlen, &tlen);
- FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
+ if (options == 1) {
+ qDebug("%s - RANGE info:", msg);
+
+ qDebug("RANGE Num:%u", info->colRangeNum);
+ for (uint16_t i = 0; i < info->colRangeNum; ++i) {
+ SFilterRangeCtx *ctx = info->colRange[i];
+ qDebug("Column ID[%d] RANGE: isnull[%d],notnull[%d],range[%d]", ctx->colId, ctx->isnull, ctx->notnull, ctx->isrange);
+ if (ctx->isrange) {
+ SFilterRangeNode *r = ctx->rs;
+ while (r) {
+ char str[256] = {0};
+ int32_t tlen = 0;
+ if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) {
+ strcat(str,"(NULL)");
+ } else {
+ FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
+ converToStr(str + strlen(str), ctx->type, &r->ra.s, tlen > 32 ? 32 : tlen, &tlen);
+ FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
+ }
+ strcat(str, " - ");
+ if (FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_NULL)) {
+ strcat(str, "(NULL)");
+ } else {
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,"(") : strcat(str,"[");
+ converToStr(str + strlen(str), ctx->type, &r->ra.e, tlen > 32 ? 32 : tlen, &tlen);
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]");
+ }
+ qDebug("range: %s", str);
+
+ r = r->next;
}
- qDebug("range: %s", str);
-
- r = r->next;
}
}
+
+ return;
+ }
+
+ qDebug("%s - Block Filter info:", msg);
+
+ if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL)) {
+ qDebug("Flag:%s", "ALL");
+ return;
+ } else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY)) {
+ qDebug("Flag:%s", "EMPTY");
+ return;
+ } else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ACTIVE)){
+ qDebug("Flag:%s", "ACTIVE");
+ }
+
+ qDebug("GroupNum:%d", info->blkGroupNum);
+ uint16_t *unitIdx = info->blkUnits;
+ for (uint16_t i = 0; i < info->blkGroupNum; ++i) {
+ qDebug("Group[%d] UnitNum: %d:", i, *unitIdx);
+ uint16_t unitNum = *(unitIdx++);
+ for (uint16_t m = 0; m < unitNum; ++m) {
+ qDebug("uidx[%d]", *(unitIdx++));
+ }
}
}
}
@@ -1674,7 +1719,9 @@ void filterFreeInfo(SFilterInfo *info) {
CHK_RETV(info == NULL);
tfree(info->cunits);
-
+ tfree(info->blkUnitRes);
+ tfree(info->blkUnits);
+
for (int32_t i = 0; i < FLD_TYPE_MAX; ++i) {
for (uint16_t f = 0; f < info->fields[i].num; ++f) {
filterFreeField(&info->fields[i].fields[f], i);
@@ -1755,7 +1802,10 @@ int32_t filterInitValFieldData(SFilterInfo *info) {
}
if (unit->compare.optr == TSDB_RELATION_IN) {
- filterConvertSetFromBinary((void **)&fi->data, var->pz, var->nLen, type);
+ SSchema *sch = FILTER_UNIT_COL_DESC(info, unit);
+ bool tolower = (sch->colId == -1) ? true : false;
+
+ filterConvertSetFromBinary((void **)&fi->data, var->pz, var->nLen, type, tolower);
CHK_LRET(fi->data == NULL, TSDB_CODE_QRY_APP_ERROR, "failed to convert in param");
FILTER_SET_FLAG(fi->flag, FLD_DATA_IS_HASH);
@@ -1825,6 +1875,12 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right)
case TSDB_RELATION_LIKE: {
return ret == 0;
}
+ case TSDB_RELATION_MATCH: {
+ return ret == 0;
+ }
+ case TSDB_RELATION_NMATCH: {
+ return ret == 0;
+ }
case TSDB_RELATION_IN: {
return ret == 1;
}
@@ -2482,10 +2538,10 @@ int32_t filterPostProcessRange(SFilterInfo *info) {
int32_t filterGenerateComInfo(SFilterInfo *info) {
- uint16_t n = 0;
-
info->cunits = malloc(info->unitNum * sizeof(*info->cunits));
-
+ info->blkUnitRes = malloc(sizeof(*info->blkUnitRes) * info->unitNum);
+ info->blkUnits = malloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum);
+
for (uint16_t i = 0; i < info->unitNum; ++i) {
SFilterUnit *unit = &info->units[i];
@@ -2493,6 +2549,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) {
info->cunits[i].rfunc = filterGetRangeCompFuncFromOptrs(unit->compare.optr, unit->compare.optr2);
info->cunits[i].optr = FILTER_UNIT_OPTR(unit);
info->cunits[i].colData = NULL;
+ info->cunits[i].colId = FILTER_UNIT_COL_ID(info, unit);
if (unit->right.type == FLD_TYPE_VALUE) {
info->cunits[i].valData = FILTER_UNIT_VAL_DATA(info, unit);
@@ -2508,69 +2565,340 @@ int32_t filterGenerateComInfo(SFilterInfo *info) {
info->cunits[i].dataSize = FILTER_UNIT_COL_SIZE(info, unit);
info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit);
}
-
- uint16_t cgroupNum = info->groupNum + 1;
- for (uint16_t i = 0; i < info->groupNum; ++i) {
- cgroupNum += info->groups[i].unitNum;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t filterUpdateComUnits(SFilterInfo *info) {
+ for (uint16_t i = 0; i < info->unitNum; ++i) {
+ SFilterUnit *unit = &info->units[i];
+
+ info->cunits[i].colData = FILTER_UNIT_COL_DATA(info, unit, 0);
}
- info->cgroups = malloc(cgroupNum * sizeof(*info->cgroups));
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t numOfCols, int32_t numOfRows) {
+ int32_t rmUnit = 0;
+
+ memset(info->blkUnitRes, 0, sizeof(*info->blkUnitRes) * info->unitNum);
- for (uint16_t i = 0; i < info->groupNum; ++i) {
- info->cgroups[n++] = info->groups[i].unitNum;
+ for (int32_t k = 0; k < info->unitNum; ++k) {
+ int32_t index = -1;
+ SFilterComUnit *cunit = &info->cunits[k];
- for (uint16_t m = 0; m < info->groups[i].unitNum; ++m) {
- info->cgroups[n++] = info->groups[i].unitIdxs[m];
+ if (FILTER_NO_MERGE_DATA_TYPE(cunit->dataType)) {
+ continue;
}
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ if (pDataStatis[i].colId == cunit->colId) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index == -1) {
+ continue;
+ }
+
+ if (pDataStatis[index].numOfNull <= 0) {
+ if (cunit->optr == TSDB_RELATION_ISNULL) {
+ info->blkUnitRes[k] = -1;
+ rmUnit = 1;
+ continue;
+ }
+
+ if (cunit->optr == TSDB_RELATION_NOTNULL) {
+ info->blkUnitRes[k] = 1;
+ rmUnit = 1;
+ continue;
+ }
+ } else {
+ if (pDataStatis[index].numOfNull == numOfRows) {
+ if (cunit->optr == TSDB_RELATION_ISNULL) {
+ info->blkUnitRes[k] = 1;
+ rmUnit = 1;
+ continue;
+ }
+
+ info->blkUnitRes[k] = -1;
+ rmUnit = 1;
+ continue;
+ }
+ }
+
+ if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL
+ || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE || cunit->optr == TSDB_RELATION_MATCH
+ || cunit->optr == TSDB_RELATION_NOT_EQUAL) {
+ continue;
+ }
+
+ SDataStatis* pDataBlockst = &pDataStatis[index];
+ void *minVal, *maxVal;
+ float minv = 0;
+ float maxv = 0;
+
+ if (cunit->dataType == TSDB_DATA_TYPE_FLOAT) {
+ minv = (float)(*(double *)(&pDataBlockst->min));
+ maxv = (float)(*(double *)(&pDataBlockst->max));
+
+ minVal = &minv;
+ maxVal = &maxv;
+ } else {
+ minVal = &pDataBlockst->min;
+ maxVal = &pDataBlockst->max;
+ }
+
+ bool minRes = false, maxRes = false;
+
+ if (cunit->rfunc >= 0) {
+ minRes = (*gRangeCompare[cunit->rfunc])(minVal, minVal, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+ maxRes = (*gRangeCompare[cunit->rfunc])(maxVal, maxVal, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+
+ if (minRes && maxRes) {
+ info->blkUnitRes[k] = 1;
+ rmUnit = 1;
+ } else if ((!minRes) && (!maxRes)) {
+ minRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_LESS_EQUAL, minVal, cunit->valData);
+ maxRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_GREATER_EQUAL, maxVal, cunit->valData2);
+
+ if (minRes && maxRes) {
+ continue;
+ }
+
+ info->blkUnitRes[k] = -1;
+ rmUnit = 1;
+ }
+ } else {
+ minRes = filterDoCompare(gDataCompare[cunit->func], cunit->optr, minVal, cunit->valData);
+ maxRes = filterDoCompare(gDataCompare[cunit->func], cunit->optr, maxVal, cunit->valData);
+
+ if (minRes && maxRes) {
+ info->blkUnitRes[k] = 1;
+ rmUnit = 1;
+ } else if ((!minRes) && (!maxRes)) {
+ if (cunit->optr == TSDB_RELATION_EQUAL) {
+ minRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_GREATER, minVal, cunit->valData);
+ maxRes = filterDoCompare(gDataCompare[cunit->func], TSDB_RELATION_LESS, maxVal, cunit->valData);
+ if (minRes || maxRes) {
+ info->blkUnitRes[k] = -1;
+ rmUnit = 1;
+ }
+
+ continue;
+ }
+
+ info->blkUnitRes[k] = -1;
+ rmUnit = 1;
+ }
+ }
+
}
- info->cgroups[n] = 0;
+ CHK_LRET(rmUnit == 0, TSDB_CODE_SUCCESS, "NO Block Filter APPLY");
+
+ info->blkGroupNum = info->groupNum;
+
+ uint16_t *unitNum = info->blkUnits;
+ uint16_t *unitIdx = unitNum + 1;
+ int32_t all = 0, empty = 0;
+ for (uint32_t g = 0; g < info->groupNum; ++g) {
+ SFilterGroup *group = &info->groups[g];
+ *unitNum = group->unitNum;
+ all = 0;
+ empty = 0;
+
+ for (uint32_t u = 0; u < group->unitNum; ++u) {
+ uint16_t uidx = group->unitIdxs[u];
+ if (info->blkUnitRes[uidx] == 1) {
+ --(*unitNum);
+ all = 1;
+ continue;
+ } else if (info->blkUnitRes[uidx] == -1) {
+ *unitNum = 0;
+ empty = 1;
+ break;
+ }
+
+ *(unitIdx++) = uidx;
+ }
+
+ if (*unitNum == 0) {
+ --info->blkGroupNum;
+ assert(empty || all);
+
+ if (empty) {
+ FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY);
+ } else {
+ FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL);
+ goto _return;
+ }
+
+ continue;
+ }
+
+ unitNum = unitIdx;
+ ++unitIdx;
+ }
+
+ if (info->blkGroupNum) {
+ FILTER_CLR_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY);
+ FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_ACTIVE);
+ }
+
+_return:
+
+ filterDumpInfoToString(info, "Block Filter", 2);
+
return TSDB_CODE_SUCCESS;
}
-int32_t filterUpdateComUnits(SFilterInfo *info) {
- for (uint16_t i = 0; i < info->unitNum; ++i) {
- SFilterUnit *unit = &info->units[i];
+bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
+ SFilterInfo *info = (SFilterInfo *)pinfo;
+ bool all = true;
+ uint16_t *unitIdx = NULL;
- info->cunits[i].colData = FILTER_UNIT_COL_DATA(info, unit, 0);
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
+
+ for (int32_t i = 0; i < numOfRows; ++i) {
+ //FILTER_UNIT_CLR_F(info);
+
+ unitIdx = info->blkUnits;
+
+ for (uint32_t g = 0; g < info->blkGroupNum; ++g) {
+ uint16_t unitNum = *(unitIdx++);
+ for (uint32_t u = 0; u < unitNum; ++u) {
+ SFilterComUnit *cunit = &info->cunits[*(unitIdx + u)];
+ void *colData = (char *)cunit->colData + cunit->dataSize * i;
+
+ //if (FILTER_UNIT_GET_F(info, uidx)) {
+ // p[i] = FILTER_UNIT_GET_R(info, uidx);
+ //} else {
+ uint8_t optr = cunit->optr;
+
+ if (isNull(colData, cunit->dataType)) {
+ (*p)[i] = optr == TSDB_RELATION_ISNULL ? true : false;
+ } else {
+ if (optr == TSDB_RELATION_NOTNULL) {
+ (*p)[i] = 1;
+ } else if (optr == TSDB_RELATION_ISNULL) {
+ (*p)[i] = 0;
+ } else if (cunit->rfunc >= 0) {
+ (*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+ } else {
+ (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ }
+
+ //FILTER_UNIT_SET_R(info, uidx, p[i]);
+ //FILTER_UNIT_SET_F(info, uidx);
+ }
+
+ if ((*p)[i] == 0) {
+ break;
+ }
+ }
+
+ if ((*p)[i]) {
+ break;
+ }
+
+ unitIdx += unitNum;
+ }
+
+ if ((*p)[i] == 0) {
+ all = false;
+ }
+ }
+
+ return all;
+}
+
+
+
+int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols, bool* all) {
+ if (statis && numOfRows >= FILTER_RM_UNIT_MIN_ROWS) {
+ info->blkFlag = 0;
+
+ filterRmUnitByRange(info, statis, numOfCols, numOfRows);
+
+ if (info->blkFlag) {
+ if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL)) {
+ *all = true;
+ goto _return;
+ } else if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY)) {
+ *all = false;
+ goto _return;
+ }
+
+ assert(info->unitNum > 1);
+
+ *all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols);
+
+ goto _return;
+ }
}
+ return 1;
+
+_return:
+ info->blkFlag = 0;
+
return TSDB_CODE_SUCCESS;
}
-static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, int8_t* p) {
+static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
return true;
}
-static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, int8_t* p) {
+static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
return false;
}
-static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, int8_t* p) {
+static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
+
+ if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ return all;
+ }
+
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
- p[i] = isNull(colData, info->cunits[uidx].dataType);
- if (p[i] == 0) {
+ (*p)[i] = ((colData == NULL) || isNull(colData, info->cunits[uidx].dataType));
+ if ((*p)[i] == 0) {
all = false;
}
}
return all;
}
-static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, int8_t* p) {
+static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
+
+ if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ return all;
+ }
+
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
- p[i] = !isNull(colData, info->cunits[uidx].dataType);
- if (p[i] == 0) {
+ (*p)[i] = ((colData != NULL) && !isNull(colData, info->cunits[uidx].dataType));
+ if ((*p)[i] == 0) {
all = false;
}
}
@@ -2578,7 +2906,7 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows
return all;
}
-bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
+bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
uint16_t dataSize = info->cunits[0].dataSize;
@@ -2587,17 +2915,25 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
void *valData = info->cunits[0].valData;
void *valData2 = info->cunits[0].valData2;
__compar_fn_t func = gDataCompare[info->cunits[0].func];
+
+ if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ return all;
+ }
+
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
for (int32_t i = 0; i < numOfRows; ++i) {
- if (isNull(colData, info->cunits[0].dataType)) {
+ if (colData == NULL || isNull(colData, info->cunits[0].dataType)) {
all = false;
colData += dataSize;
continue;
}
- p[i] = (*rfunc)(colData, colData, valData, valData2, func);
+ (*p)[i] = (*rfunc)(colData, colData, valData, valData2, func);
- if (p[i] == 0) {
+ if ((*p)[i] == 0) {
all = false;
}
@@ -2607,21 +2943,30 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t* p) {
return all;
}
-bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t* p) {
+bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
+
+ if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ return all;
+ }
+
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
for (int32_t i = 0; i < numOfRows; ++i) {
uint16_t uidx = info->groups[0].unitIdxs[0];
void *colData = (char *)info->cunits[uidx].colData + info->cunits[uidx].dataSize * i;
- if (isNull(colData, info->cunits[uidx].dataType)) {
+ if (colData == NULL || isNull(colData, info->cunits[uidx].dataType)) {
+ (*p)[i] = 0;
all = false;
continue;
}
- p[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
+ (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
- if (p[i] == 0) {
+ if ((*p)[i] == 0) {
all = false;
}
}
@@ -2630,10 +2975,18 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t* p) {
}
-bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
+bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
+ if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ return all;
+ }
+
+ if (*p == NULL) {
+ *p = calloc(numOfRows, sizeof(int8_t));
+ }
+
for (int32_t i = 0; i < numOfRows; ++i) {
//FILTER_UNIT_CLR_F(info);
@@ -2649,34 +3002,34 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
//} else {
uint8_t optr = cunit->optr;
- if (isNull(colData, cunit->dataType)) {
- p[i] = optr == TSDB_RELATION_ISNULL ? true : false;
+ if (colData == NULL || isNull(colData, cunit->dataType)) {
+ (*p)[i] = optr == TSDB_RELATION_ISNULL ? true : false;
} else {
if (optr == TSDB_RELATION_NOTNULL) {
- p[i] = 1;
+ (*p)[i] = 1;
} else if (optr == TSDB_RELATION_ISNULL) {
- p[i] = 0;
+ (*p)[i] = 0;
} else if (cunit->rfunc >= 0) {
- p[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+ (*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
- p[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
}
//FILTER_UNIT_SET_R(info, uidx, p[i]);
//FILTER_UNIT_SET_F(info, uidx);
}
- if (p[i] == 0) {
+ if ((*p)[i] == 0) {
break;
}
}
- if (p[i]) {
+ if ((*p)[i]) {
break;
}
}
- if (p[i] == 0) {
+ if ((*p)[i] == 0) {
all = false;
}
}
@@ -2684,8 +3037,9 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t* p) {
return all;
}
-FORCE_INLINE bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t* p) {
- return (*info->func)(info, numOfRows, p);
+
+FORCE_INLINE bool filterExecute(SFilterInfo *info, int32_t numOfRows, int8_t** p, SDataStatis *statis, int16_t numOfCols) {
+ return (*info->func)(info, numOfRows, p, statis, numOfCols);
}
int32_t filterSetExecFunc(SFilterInfo *info) {
@@ -2767,7 +3121,7 @@ _return:
return TSDB_CODE_SUCCESS;
}
-int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data) {
+int32_t filterSetColFieldData(SFilterInfo *info, void *param, filer_get_col_from_id fp) {
CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "info NULL");
CHK_LRET(info->fields[FLD_TYPE_COLUMN].num <= 0, TSDB_CODE_QRY_APP_ERROR, "no column fileds");
@@ -2778,11 +3132,8 @@ int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data) {
for (uint16_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) {
SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i];
SSchema* sch = fi->desc;
- if (sch->colId == colId) {
- fi->data = data;
- break;
- }
+ (*fp)(param, sch->colId, &fi->data);
}
filterUpdateComUnits(info);
@@ -2791,7 +3142,7 @@ int32_t filterSetColFieldData(SFilterInfo *info, int16_t colId, void *data) {
}
-int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t options) {
+int32_t filterInitFromTree(tExprNode* tree, void **pinfo, uint32_t options) {
int32_t code = TSDB_CODE_SUCCESS;
SFilterInfo *info = NULL;
@@ -2828,8 +3179,6 @@ int32_t filterInitFromTree(tExprNode* tree, SFilterInfo **pinfo, uint32_t option
taosArrayDestroy(group);
return code;
}
-
- //ERR_JRET(filterInitUnitFunc(info));
}
info->unitRes = malloc(info->unitNum * sizeof(*info->unitRes));
@@ -2880,36 +3229,43 @@ bool filterRangeExecute(SFilterInfo *info, SDataStatis *pDataStatis, int32_t num
// no statistics data, load the true data block
if (index == -1) {
- return true;
+ break;
}
// not support pre-filter operation on binary/nchar data type
if (FILTER_NO_MERGE_DATA_TYPE(ctx->type)) {
- return true;
- }
-
- if ((pDataStatis[index].numOfNull <= 0) && (ctx->isnull && !ctx->notnull && !ctx->isrange)) {
- return false;
+ break;
}
-
- // all data in current column are NULL, no need to check its boundary value
- if (pDataStatis[index].numOfNull == numOfRows) {
- // if isNULL query exists, load the null data column
- if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) {
- return false;
+ if (pDataStatis[index].numOfNull <= 0) {
+ if (ctx->isnull && !ctx->notnull && !ctx->isrange) {
+ ret = false;
+ break;
}
+ } else if (pDataStatis[index].numOfNull > 0) {
+ if (pDataStatis[index].numOfNull == numOfRows) {
+ if ((ctx->notnull || ctx->isrange) && (!ctx->isnull)) {
+ ret = false;
+ break;
+ }
- continue;
+ continue;
+ } else {
+ if (ctx->isnull) {
+ continue;
+ }
+ }
}
SDataStatis* pDataBlockst = &pDataStatis[index];
SFilterRangeNode *r = ctx->rs;
+ float minv = 0;
+ float maxv = 0;
if (ctx->type == TSDB_DATA_TYPE_FLOAT) {
- float minv = (float)(*(double *)(&pDataBlockst->min));
- float maxv = (float)(*(double *)(&pDataBlockst->max));
+ minv = (float)(*(double *)(&pDataBlockst->min));
+ maxv = (float)(*(double *)(&pDataBlockst->max));
minVal = &minv;
maxVal = &maxv;
@@ -3070,6 +3426,52 @@ int32_t filterFreeNcharColumns(SFilterInfo* info) {
return TSDB_CODE_SUCCESS;
}
+int32_t filterIsIndexedColumnQuery(SFilterInfo* info, int32_t idxId, bool *res) {
+ CHK_LRET(info == NULL, TSDB_CODE_QRY_APP_ERROR, "null parameter");
+
+ CHK_JMP(info->fields[FLD_TYPE_COLUMN].num > 1 || info->fields[FLD_TYPE_COLUMN].num <= 0);
+
+ CHK_JMP(info->unitNum > 1 || info->unitNum <= 0);
+
+ CHK_JMP(FILTER_GET_COL_FIELD_ID(FILTER_GET_COL_FIELD(info, 0)) != idxId);
+
+ int32_t optr = FILTER_UNIT_OPTR(info->units);
+
+ CHK_JMP(optr == TSDB_RELATION_LIKE || optr == TSDB_RELATION_IN || optr == TSDB_RELATION_MATCH
+ || optr == TSDB_RELATION_ISNULL || optr == TSDB_RELATION_NOTNULL);
+
+ *res = true;
+
+ return TSDB_CODE_SUCCESS;
+
+_return:
+ *res = false;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
+int32_t filterGetIndexedColumnInfo(SFilterInfo* info, char** val, int32_t *order, int32_t *flag) {
+ SFilterComUnit *cunit = info->cunits;
+ uint8_t optr = cunit->optr;
+
+ *val = cunit->valData;
+ *order = TSDB_ORDER_ASC;
+
+ if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
+ *order = TSDB_ORDER_DESC;
+ }
+
+ if (optr == TSDB_RELATION_NOT_EQUAL) {
+ *order = TSDB_ORDER_ASC|TSDB_ORDER_DESC;
+ }
+
+ if (cunit->valData2 == cunit->valData && optr != TSDB_RELATION_EQUAL) {
+ FILTER_SET_FLAG(*flag, FI_ACTION_NO_NEED);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c
index e3326cc26bc4216d131211473e807a9845d49133..e9022db503f005ae6713e66e47bbde440bb4aaf7 100644
--- a/src/query/src/qPercentile.c
+++ b/src/query/src/qPercentile.c
@@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
}
pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes;
- pBucket->comparFn = getKeyComparFunc(pBucket->type);
+ pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC);
pBucket->hashFunc = getHashFunc(pBucket->type);
if (pBucket->hashFunc == NULL) {
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index b8a5ee7699b34fed82ad67a592a6ca9148cc92cb..abfa20714b333754478e5c48b9265f839b05a4b1 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t op = 0;
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
- if (onlyQueryTags(pQueryAttr)) {
- op = OP_TagScan;
- taosArrayPush(plan, &op);
- }
+ op = OP_TagScan;
+ taosArrayPush(plan, &op);
+
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
@@ -646,13 +645,25 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
} else {
op = OP_Project;
taosArrayPush(plan, &op);
+
+ if (pQueryAttr->pExpr2 != NULL) {
+ op = OP_Project;
+ taosArrayPush(plan, &op);
+ }
+
if (pQueryAttr->distinct) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
}
+
+ // outer query order by support
+ int32_t orderColId = pQueryAttr->order.orderColId;
+ if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) {
+ op = OP_Order;
+ taosArrayPush(plan, &op);
+ }
}
-
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
op = OP_Limit;
@@ -693,7 +704,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
// fill operator
- if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
+ if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
op = OP_Fill;
taosArrayPush(plan, &op);
}
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index 011892fe93f3dc941b17ef7b22313c11fca23d74..d156230efbc75c46205637747bb58f86d13763fe 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -766,7 +766,7 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
pSqlNode->pSortOrder = pSortOrder;
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
- pSqlNode->pHaving = pHaving;
+ pSqlNode->pHaving = pHaving;
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index 825b7960defc2547dfd7248bdc15d157e5808b24..4cf05dd2c7703c7879410faa2632e17a16d595fd 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
static void shrinkBuffer(STSList* ptsData) {
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
if (ptsData->allocSize >= ptsData->threshold * 2) {
- ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
- ptsData->allocSize = MEM_BUF_SIZE;
+ char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
+ if(rawBuf) {
+ ptsData->rawBuf = rawBuf;
+ ptsData->allocSize = MEM_BUF_SIZE;
+ }
}
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 4caf351799adbf000265566fb22617067efb725d..bc27e094db3dcb85ffa73810e922d73cd42ab3a0 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -436,13 +436,13 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *
}
STableQueryInfo** pList = supporter->pTableQueryInfo;
-
- SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo);
- SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos);
+ SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos];
+// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos);
TSKEY leftTimestamp = pWindowRes1->win.skey;
- SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo);
- SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos);
+// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo);
+// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos);
+ SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos];
TSKEY rightTimestamp = pWindowRes2->win.skey;
if (leftTimestamp == rightTimestamp) {
@@ -456,7 +456,77 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *
}
}
-static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList,
+int32_t tsAscOrder(const void* p1, const void* p2) {
+ SResultRowCell* pc1 = (SResultRowCell*) p1;
+ SResultRowCell* pc2 = (SResultRowCell*) p2;
+
+ if (pc1->groupId == pc2->groupId) {
+ if (pc1->pRow->win.skey == pc2->pRow->win.skey) {
+ return 0;
+ } else {
+ return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1;
+ }
+ } else {
+ return (pc1->groupId < pc2->groupId)? -1:1;
+ }
+}
+
+int32_t tsDescOrder(const void* p1, const void* p2) {
+ SResultRowCell* pc1 = (SResultRowCell*) p1;
+ SResultRowCell* pc2 = (SResultRowCell*) p2;
+
+ if (pc1->groupId == pc2->groupId) {
+ if (pc1->pRow->win.skey == pc2->pRow->win.skey) {
+ return 0;
+ } else {
+ return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1;
+ }
+ } else {
+ return (pc1->groupId < pc2->groupId)? -1:1;
+ }
+}
+
+void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) {
+ __compar_fn_t fn = NULL;
+ if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) {
+ fn = tsAscOrder;
+ } else {
+ fn = tsDescOrder;
+ }
+
+ taosArraySort(pRuntimeEnv->pResultRowArrayList, fn);
+}
+
+static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, uint64_t groupId, int32_t* rowCellInfoOffset) {
+ if (!pGroupResInfo->ordered) {
+ orderTheResultRows(pRuntimeEnv);
+ pGroupResInfo->ordered = true;
+ }
+
+ if (pGroupResInfo->pRows == NULL) {
+ pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES);
+ }
+
+ size_t len = taosArrayGetSize(pRuntimeEnv->pResultRowArrayList);
+ for(; pGroupResInfo->position < len; ++pGroupResInfo->position) {
+ SResultRowCell* pResultRowCell = taosArrayGet(pRuntimeEnv->pResultRowArrayList, pGroupResInfo->position);
+ if (pResultRowCell->groupId != groupId) {
+ break;
+ }
+
+ int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pResultRowCell->pRow, rowCellInfoOffset);
+ if (num <= 0) {
+ continue;
+ }
+
+ taosArrayPush(pGroupResInfo->pRows, &pResultRowCell->pRow);
+ pResultRowCell->pRow->numOfRows = (uint32_t) num;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static UNUSED_FUNC int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList,
int32_t* rowCellInfoOffset) {
bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr);
@@ -562,12 +632,7 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu
int64_t st = taosGetTimestampUs();
while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) {
- SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup);
-
- int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset);
- if (ret != TSDB_CODE_SUCCESS) {
- return ret;
- }
+ mergeIntoGroupResultImplRv(pRuntimeEnv, pGroupResInfo, pGroupResInfo->currentGroup, offset);
// this group generates at least one result, return results
if (taosArrayGetSize(pGroupResInfo->pRows) > 0) {
@@ -583,7 +648,6 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu
qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv),
pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime);
-// pQInfo->summary.firstStageMergeTime += elapsedTime;
return TSDB_CODE_SUCCESS;
}
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index 403b51426fd1e8f2c5c14c84dd0671064e093934..c6e6eddce7d8f56095d5d78f4d1f84ed1d4f3c97 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -35,7 +35,7 @@ typedef struct SQueryMgmt {
bool closed;
} SQueryMgmt;
-static void queryMgmtKillQueryFn(void* handle) {
+static void queryMgmtKillQueryFn(void* handle, void* param1) {
void** fp = (void**)handle;
qKillQuery(*fp);
}
@@ -53,7 +53,6 @@ static void freeqinfoFn(void *qhandle) {
void freeParam(SQueryParam *param) {
tfree(param->sql);
tfree(param->tagCond);
- tfree(param->tbnameCond);
tfree(param->pTableIdList);
taosArrayDestroy(param->pOperator);
tfree(param->pExprs);
@@ -140,7 +139,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
qDebug("qmsg:%p query stable, uid:%"PRIu64", tid:%d", pQueryMsg, id->uid, id->tid);
code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen,
- pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols);
+ &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols);
if (code != TSDB_CODE_SUCCESS) {
qError("qmsg:%p failed to query stable, reason: %s", pQueryMsg, tstrerror(code));
@@ -215,6 +214,50 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
return code;
}
+#ifdef TEST_IMPL
+// wait moment
+int waitMoment(SQInfo* pQInfo){
+ if(pQInfo->sql) {
+ int ms = 0;
+ char* pcnt = strstr(pQInfo->sql, " count(*)");
+ if(pcnt) return 0;
+
+ char* pos = strstr(pQInfo->sql, " t_");
+ if(pos){
+ pos += 3;
+ ms = atoi(pos);
+ while(*pos >= '0' && *pos <= '9'){
+ pos ++;
+ }
+ char unit_char = *pos;
+ if(unit_char == 'h'){
+ ms *= 3600*1000;
+ } else if(unit_char == 'm'){
+ ms *= 60*1000;
+ } else if(unit_char == 's'){
+ ms *= 1000;
+ }
+ }
+ if(ms == 0) return 0;
+ printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql);
+
+ if(ms < 1000) {
+ taosMsleep(ms);
+ } else {
+ int used_ms = 0;
+ while(used_ms < ms) {
+ taosMsleep(1000);
+ used_ms += 1000;
+ if(isQueryKilled(pQInfo)){
+ printf("test check query is canceled, sleep break.%s\n", pQInfo->sql);
+ break;
+ }
+ }
+ }
+ }
+ return 1;
+}
+#endif
bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
SQInfo *pQInfo = (SQInfo *)qinfo;
@@ -229,7 +272,8 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
}
*qId = pQInfo->qId;
- pQInfo->startExecTs = taosGetTimestampSec();
+ if(pQInfo->startExecTs == 0)
+ pQInfo->startExecTs = taosGetTimestampMs();
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId);
@@ -256,7 +300,13 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
bool newgroup = false;
publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_BEFORE_OPERATOR_EXEC);
+
+ int64_t st = taosGetTimestampUs();
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup);
+ pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st);
+#ifdef TEST_IMPL
+ waitMoment(pQInfo);
+#endif
publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC);
pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv);
@@ -321,6 +371,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *contLen, bool* continueExec) {
SQInfo *pQInfo = (SQInfo *)qinfo;
+ int32_t compLen = 0;
if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
@@ -353,12 +404,25 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
}
(*pRsp)->precision = htons(pQueryAttr->precision);
+ (*pRsp)->compressed = (int8_t)((tsCompressColData != -1) && checkNeedToCompressQueryCol(pQInfo));
+
if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) {
- doDumpQueryResult(pQInfo, (*pRsp)->data);
+ doDumpQueryResult(pQInfo, (*pRsp)->data, (*pRsp)->compressed, &compLen);
} else {
setQueryStatus(pRuntimeEnv, QUERY_OVER);
}
+ if ((*pRsp)->compressed && compLen != 0) {
+ int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput;
+ int32_t origSize = pQueryAttr->resultRowSize * s;
+ int32_t compSize = compLen + numOfCols * sizeof(int32_t);
+ *contLen = *contLen - origSize + compSize;
+ *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*pRsp, *contLen);
+ qDebug("QInfo:0x%"PRIx64" compress col data, uncompressed size:%d, compressed size:%d, ratio:%.2f",
+ pQInfo->qId, origSize, compSize, (float)origSize / (float)compSize);
+ }
+ (*pRsp)->compLen = htonl(compLen);
+
pQInfo->rspContext = NULL;
pQInfo->dataReady = QUERY_RESULT_NOT_READY;
@@ -462,7 +526,7 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) {
pQueryMgmt->closed = true;
pthread_mutex_unlock(&pQueryMgmt->lock);
- taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn);
+ taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn, NULL);
}
void qQueryMgmtReOpen(void *pQMgmt) {
@@ -557,3 +621,148 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) {
taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle);
return 0;
}
+
+//kill by qid
+int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) {
+ int32_t error = TSDB_CODE_SUCCESS;
+ void** handle = qAcquireQInfo(pMgmt, qId);
+ if(handle == NULL) return terrno;
+
+ SQInfo* pQInfo = (SQInfo*)(*handle);
+ if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
+ return TSDB_CODE_QRY_INVALID_QHANDLE;
+ }
+ qWarn("QId:0x%"PRIx64" be killed(no memory commit).", pQInfo->qId);
+ setQueryKilled(pQInfo);
+
+ // wait query stop
+ int32_t loop = 0;
+ while (pQInfo->owner != 0) {
+ taosMsleep(waitMs);
+ if(loop++ > waitCount){
+ error = TSDB_CODE_FAILED;
+ break;
+ }
+ }
+
+ qReleaseQInfo(pMgmt, (void **)&handle, true);
+ return error;
+}
+
+// local struct
+typedef struct {
+ int64_t qId;
+ int64_t startExecTs;
+} SLongQuery;
+
+// callbark for sort compare
+static int compareLongQuery(const void* p1, const void* p2) {
+ // sort desc
+ SLongQuery* plq1 = *(SLongQuery**)p1;
+ SLongQuery* plq2 = *(SLongQuery**)p2;
+ if(plq1->startExecTs == plq2->startExecTs) {
+ return 0;
+ } else if(plq1->startExecTs > plq2->startExecTs) {
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+// callback for taosCacheRefresh
+static void cbFoundItem(void* handle, void* param1) {
+ SQInfo * qInfo = *(SQInfo**) handle;
+ if(qInfo == NULL) return ;
+ SArray* qids = (SArray*) param1;
+ if(qids == NULL) return ;
+
+ bool usedMem = true;
+ bool usedIMem = true;
+ SMemTable* mem = qInfo->query.memRef.snapshot.omem;
+ SMemTable* imem = qInfo->query.memRef.snapshot.imem;
+ if(mem == NULL || T_REF_VAL_GET(mem) == 0)
+ usedMem = false;
+ if(imem == NULL || T_REF_VAL_GET(mem) == 0)
+ usedIMem = false ;
+
+ if(!usedMem && !usedIMem)
+ return ;
+
+ // push to qids
+ SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery));
+ plq->qId = qInfo->qId;
+ plq->startExecTs = qInfo->startExecTs;
+ taosArrayPush(qids, &plq);
+}
+
+// longquery
+void* qObtainLongQuery(void* param){
+ SQueryMgmt* qMgmt = (SQueryMgmt*)param;
+ if(qMgmt == NULL || qMgmt->qinfoPool == NULL)
+ return NULL;
+ SArray* qids = taosArrayInit(4, sizeof(int64_t*));
+ if(qids == NULL) return NULL;
+ // Get each item
+ taosCacheRefresh(qMgmt->qinfoPool, cbFoundItem, qids);
+
+ size_t cnt = taosArrayGetSize(qids);
+ if(cnt == 0) {
+ taosArrayDestroy(qids);
+ return NULL;
+ }
+ if(cnt > 1)
+ taosArraySort(qids, compareLongQuery);
+
+ return qids;
+}
+
+//solve tsdb no block to commit
+bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) {
+ SQueryMgmt *pQueryMgmt = pMgmt;
+ bool fixed = false;
+
+ // qid top list
+ SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt);
+ if(qids == NULL) return false;
+
+ // kill Query
+ int64_t now = taosGetTimestampMs();
+ size_t cnt = taosArrayGetSize(qids);
+ size_t i;
+ SLongQuery* plq;
+ for(i=0; i < cnt; i++) {
+ plq = (SLongQuery* )taosArrayGetP(qids, i);
+ if(plq->startExecTs > now) continue;
+ if(now - plq->startExecTs >= longQueryMs) {
+ qKillQueryByQId(pMgmt, plq->qId, 500, 10); // wait 50*100 ms
+ if(tsdbNoProblem(pRepo)) {
+ fixed = true;
+ qWarn("QId:0x%"PRIx64" fixed problem after kill this query.", plq->qId);
+ break;
+ }
+ }
+ }
+
+ // free qids
+ for(i=0; i < cnt; i++) {
+ free(taosArrayGetP(qids, i));
+ }
+ taosArrayDestroy(qids);
+ return fixed;
+}
+
+//solve tsdb no block to commit
+bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) {
+ qWarn("pRepo=%p start solve problem.", pRepo);
+ if(qFixedNoBlock(pRepo, pMgmt, 10*60*1000)) {
+ return true;
+ }
+ if(qFixedNoBlock(pRepo, pMgmt, 2*60*1000)){
+ return true;
+ }
+ if(qFixedNoBlock(pRepo, pMgmt, 30*1000)){
+ return true;
+ }
+ qWarn("pRepo=%p solve problem failed.", pRepo);
+ return false;
+}
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index 09be4c0cf09df7560b52fc0704a856369946cf38..e89b6232f7e42b764df7660f06dcd207bfe6e4de 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -102,28 +102,29 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 275
+#define YYNOCODE 278
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SStrToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- SSessionWindowVal yy39;
- SCreateDbInfo yy42;
- int yy43;
- tSqlExpr* yy46;
- SCreatedTableInfo yy96;
- SArray* yy131;
- TAOS_FIELD yy163;
- SSqlNode* yy256;
- SCreateTableSql* yy272;
- SLimitVal yy284;
- SCreateAcctInfo yy341;
- int64_t yy459;
- tVariant yy516;
- SIntervalVal yy530;
- SWindowStateVal yy538;
- SRelationInfo* yy544;
+ SRelationInfo* yy8;
+ SWindowStateVal yy40;
+ SSqlNode* yy56;
+ SCreateDbInfo yy90;
+ int yy96;
+ int32_t yy104;
+ SSessionWindowVal yy147;
+ SCreatedTableInfo yy152;
+ SLimitVal yy166;
+ SCreateAcctInfo yy171;
+ TAOS_FIELD yy183;
+ int64_t yy325;
+ SIntervalVal yy400;
+ SArray* yy421;
+ tVariant yy430;
+ SCreateTableSql* yy438;
+ tSqlExpr* yy439;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -139,18 +140,18 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 362
-#define YYNRULE 289
-#define YYNRULE_WITH_ACTION 289
-#define YYNTOKEN 195
-#define YY_MAX_SHIFT 361
-#define YY_MIN_SHIFTREDUCE 567
-#define YY_MAX_SHIFTREDUCE 855
-#define YY_ERROR_ACTION 856
-#define YY_ACCEPT_ACTION 857
-#define YY_NO_ACTION 858
-#define YY_MIN_REDUCE 859
-#define YY_MAX_REDUCE 1147
+#define YYNSTATE 368
+#define YYNRULE 294
+#define YYNRULE_WITH_ACTION 294
+#define YYNTOKEN 197
+#define YY_MAX_SHIFT 367
+#define YY_MIN_SHIFTREDUCE 576
+#define YY_MAX_SHIFTREDUCE 869
+#define YY_ERROR_ACTION 870
+#define YY_ACCEPT_ACTION 871
+#define YY_NO_ACTION 872
+#define YY_MIN_REDUCE 873
+#define YY_MAX_REDUCE 1166
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -217,288 +218,292 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (754)
+#define YY_ACTTAB_COUNT (773)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 207, 618, 246, 618, 618, 245, 360, 229, 160, 619,
- /* 10 */ 1123, 619, 619, 56, 57, 1036, 60, 61, 857, 361,
- /* 20 */ 249, 50, 618, 59, 318, 64, 62, 65, 63, 984,
- /* 30 */ 619, 982, 983, 55, 54, 160, 985, 53, 52, 51,
- /* 40 */ 986, 153, 987, 988, 356, 945, 654, 568, 569, 570,
- /* 50 */ 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- /* 60 */ 581, 151, 207, 230, 907, 207, 56, 57, 1027, 60,
- /* 70 */ 61, 189, 1124, 249, 50, 1124, 59, 318, 64, 62,
- /* 80 */ 65, 63, 1072, 1033, 271, 79, 55, 54, 3, 190,
- /* 90 */ 53, 52, 51, 56, 57, 250, 60, 61, 702, 1027,
- /* 100 */ 249, 50, 29, 59, 318, 64, 62, 65, 63, 91,
- /* 110 */ 278, 277, 37, 55, 54, 232, 94, 53, 52, 51,
- /* 120 */ 235, 120, 114, 125, 1014, 241, 338, 337, 124, 1014,
- /* 130 */ 130, 133, 123, 56, 58, 794, 60, 61, 127, 85,
- /* 140 */ 249, 50, 92, 59, 318, 64, 62, 65, 63, 997,
- /* 150 */ 998, 34, 1001, 55, 54, 207, 80, 53, 52, 51,
- /* 160 */ 57, 1010, 60, 61, 316, 1124, 249, 50, 263, 59,
- /* 170 */ 318, 64, 62, 65, 63, 37, 44, 267, 266, 55,
- /* 180 */ 54, 348, 243, 53, 52, 51, 1014, 160, 43, 314,
- /* 190 */ 355, 354, 313, 312, 311, 353, 310, 309, 308, 352,
- /* 200 */ 307, 351, 350, 976, 964, 965, 966, 967, 968, 969,
- /* 210 */ 970, 971, 972, 973, 974, 975, 977, 978, 60, 61,
- /* 220 */ 231, 160, 249, 50, 1011, 59, 318, 64, 62, 65,
- /* 230 */ 63, 1008, 1027, 24, 258, 55, 54, 1000, 97, 53,
- /* 240 */ 52, 51, 252, 248, 809, 175, 1013, 798, 233, 801,
- /* 250 */ 210, 804, 248, 809, 1143, 917, 798, 216, 801, 292,
- /* 260 */ 804, 90, 189, 135, 134, 215, 258, 55, 54, 323,
- /* 270 */ 85, 53, 52, 51, 1002, 227, 228, 176, 242, 319,
- /* 280 */ 5, 40, 179, 258, 227, 228, 23, 178, 103, 108,
- /* 290 */ 99, 107, 204, 726, 1012, 1073, 723, 290, 724, 908,
- /* 300 */ 725, 64, 62, 65, 63, 303, 189, 44, 257, 55,
- /* 310 */ 54, 37, 37, 53, 52, 51, 800, 253, 803, 251,
- /* 320 */ 316, 326, 325, 66, 254, 255, 198, 196, 194, 270,
- /* 330 */ 37, 77, 66, 193, 139, 138, 137, 136, 223, 742,
- /* 340 */ 799, 43, 802, 355, 354, 37, 37, 37, 353, 53,
- /* 350 */ 52, 51, 352, 37, 351, 350, 239, 240, 810, 805,
- /* 360 */ 1011, 1011, 272, 78, 37, 806, 122, 810, 805, 37,
- /* 370 */ 37, 359, 358, 144, 806, 327, 38, 14, 348, 1011,
- /* 380 */ 82, 93, 70, 259, 739, 256, 320, 333, 332, 83,
- /* 390 */ 328, 329, 330, 73, 1011, 1011, 1011, 999, 334, 150,
- /* 400 */ 148, 147, 1011, 1, 177, 775, 776, 727, 728, 335,
- /* 410 */ 9, 96, 796, 1011, 336, 340, 758, 274, 1011, 1011,
- /* 420 */ 1083, 766, 767, 746, 71, 712, 274, 295, 714, 297,
- /* 430 */ 155, 713, 33, 74, 807, 67, 26, 830, 811, 38,
- /* 440 */ 247, 38, 67, 95, 76, 67, 617, 16, 797, 15,
- /* 450 */ 205, 25, 25, 113, 18, 112, 17, 731, 808, 732,
- /* 460 */ 25, 6, 729, 211, 730, 298, 20, 119, 19, 118,
- /* 470 */ 22, 1120, 21, 132, 131, 1119, 701, 1118, 225, 226,
- /* 480 */ 208, 209, 1135, 212, 206, 213, 214, 813, 218, 219,
- /* 490 */ 220, 217, 203, 1082, 237, 1079, 1078, 238, 339, 47,
- /* 500 */ 1028, 268, 152, 1065, 1064, 1035, 149, 275, 1009, 279,
- /* 510 */ 1046, 1043, 1044, 1048, 154, 159, 286, 171, 172, 273,
- /* 520 */ 234, 1007, 173, 162, 174, 922, 300, 301, 302, 305,
- /* 530 */ 306, 757, 1025, 45, 281, 201, 161, 283, 41, 317,
- /* 540 */ 75, 916, 293, 72, 49, 324, 164, 1142, 291, 110,
- /* 550 */ 1141, 1138, 163, 289, 180, 331, 1134, 116, 1133, 1130,
- /* 560 */ 287, 285, 181, 942, 42, 39, 46, 202, 282, 904,
- /* 570 */ 126, 902, 128, 129, 900, 899, 260, 280, 192, 897,
- /* 580 */ 896, 895, 894, 893, 892, 891, 195, 197, 888, 886,
- /* 590 */ 884, 882, 199, 48, 879, 200, 875, 304, 349, 81,
- /* 600 */ 86, 284, 1066, 121, 341, 342, 343, 344, 345, 346,
- /* 610 */ 347, 357, 855, 262, 261, 854, 224, 244, 299, 264,
- /* 620 */ 265, 853, 836, 221, 222, 835, 269, 294, 104, 921,
- /* 630 */ 920, 274, 105, 10, 276, 87, 84, 898, 734, 140,
- /* 640 */ 30, 156, 141, 184, 890, 183, 943, 182, 185, 187,
- /* 650 */ 186, 142, 188, 2, 889, 759, 143, 881, 980, 165,
- /* 660 */ 880, 166, 167, 944, 168, 169, 170, 4, 990, 768,
- /* 670 */ 157, 158, 762, 88, 236, 764, 89, 288, 31, 11,
- /* 680 */ 32, 12, 13, 27, 296, 28, 96, 101, 98, 35,
- /* 690 */ 100, 632, 36, 667, 102, 665, 664, 663, 661, 660,
- /* 700 */ 659, 656, 622, 315, 106, 7, 321, 812, 322, 8,
- /* 710 */ 814, 109, 111, 68, 69, 38, 704, 703, 115, 700,
- /* 720 */ 117, 648, 646, 638, 644, 640, 642, 636, 634, 670,
- /* 730 */ 669, 668, 666, 662, 658, 657, 191, 620, 585, 859,
- /* 740 */ 858, 858, 858, 858, 858, 858, 858, 858, 858, 858,
- /* 750 */ 858, 858, 145, 146,
+ /* 0 */ 23, 628, 366, 235, 1051, 208, 241, 712, 211, 629,
+ /* 10 */ 1029, 871, 367, 59, 60, 173, 63, 64, 1042, 1142,
+ /* 20 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68,
+ /* 30 */ 66, 157, 629, 286, 238, 58, 57, 344, 343, 56,
+ /* 40 */ 55, 54, 59, 60, 247, 63, 64, 252, 1029, 255,
+ /* 50 */ 53, 52, 51, 209, 62, 324, 67, 65, 68, 66,
+ /* 60 */ 999, 1042, 997, 998, 58, 57, 664, 1000, 56, 55,
+ /* 70 */ 54, 1001, 1048, 1002, 1003, 58, 57, 277, 1015, 56,
+ /* 80 */ 55, 54, 59, 60, 164, 63, 64, 38, 82, 255,
+ /* 90 */ 53, 52, 51, 88, 62, 324, 67, 65, 68, 66,
+ /* 100 */ 284, 283, 249, 752, 58, 57, 1029, 211, 56, 55,
+ /* 110 */ 54, 38, 59, 61, 806, 63, 64, 1042, 1143, 255,
+ /* 120 */ 53, 52, 51, 628, 62, 324, 67, 65, 68, 66,
+ /* 130 */ 45, 629, 237, 239, 58, 57, 1026, 164, 56, 55,
+ /* 140 */ 54, 60, 1023, 63, 64, 771, 772, 255, 53, 52,
+ /* 150 */ 51, 95, 62, 324, 67, 65, 68, 66, 38, 1090,
+ /* 160 */ 1025, 296, 58, 57, 322, 83, 56, 55, 54, 577,
+ /* 170 */ 578, 579, 580, 581, 582, 583, 584, 585, 586, 587,
+ /* 180 */ 588, 589, 590, 155, 322, 236, 63, 64, 756, 248,
+ /* 190 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68,
+ /* 200 */ 66, 251, 629, 245, 354, 58, 57, 1026, 215, 56,
+ /* 210 */ 55, 54, 1089, 44, 320, 361, 360, 319, 318, 317,
+ /* 220 */ 359, 316, 315, 314, 358, 313, 357, 356, 808, 38,
+ /* 230 */ 1, 180, 24, 991, 979, 980, 981, 982, 983, 984,
+ /* 240 */ 985, 986, 987, 988, 989, 990, 992, 993, 256, 214,
+ /* 250 */ 38, 254, 821, 922, 100, 810, 222, 813, 164, 816,
+ /* 260 */ 192, 211, 139, 138, 137, 221, 809, 254, 821, 329,
+ /* 270 */ 88, 810, 1143, 813, 246, 816, 1028, 29, 1026, 67,
+ /* 280 */ 65, 68, 66, 38, 1162, 233, 234, 58, 57, 325,
+ /* 290 */ 1017, 56, 55, 54, 38, 333, 56, 55, 54, 1026,
+ /* 300 */ 269, 233, 234, 258, 5, 41, 182, 45, 211, 273,
+ /* 310 */ 272, 181, 106, 111, 102, 110, 164, 73, 736, 1143,
+ /* 320 */ 932, 733, 812, 734, 815, 735, 263, 192, 334, 276,
+ /* 330 */ 309, 80, 1026, 94, 69, 123, 117, 128, 229, 335,
+ /* 340 */ 362, 960, 127, 1026, 133, 136, 126, 202, 200, 198,
+ /* 350 */ 69, 260, 261, 130, 197, 143, 142, 141, 140, 74,
+ /* 360 */ 44, 97, 361, 360, 788, 923, 38, 359, 38, 822,
+ /* 370 */ 817, 358, 192, 357, 356, 38, 818, 38, 38, 259,
+ /* 380 */ 811, 257, 814, 332, 331, 822, 817, 264, 125, 298,
+ /* 390 */ 264, 93, 818, 326, 1012, 1013, 35, 1016, 178, 14,
+ /* 400 */ 354, 179, 265, 96, 262, 264, 339, 338, 154, 152,
+ /* 410 */ 151, 336, 749, 340, 81, 1026, 1027, 1026, 3, 193,
+ /* 420 */ 341, 787, 342, 346, 1026, 278, 1026, 1026, 365, 364,
+ /* 430 */ 148, 85, 86, 99, 76, 737, 738, 768, 9, 39,
+ /* 440 */ 778, 779, 722, 819, 301, 724, 216, 303, 1014, 723,
+ /* 450 */ 34, 159, 844, 823, 70, 26, 39, 253, 39, 70,
+ /* 460 */ 79, 98, 627, 70, 135, 134, 25, 25, 280, 280,
+ /* 470 */ 16, 116, 15, 115, 77, 18, 25, 17, 741, 6,
+ /* 480 */ 742, 274, 739, 304, 740, 20, 122, 19, 121, 22,
+ /* 490 */ 217, 21, 711, 1100, 1137, 1136, 1135, 825, 231, 156,
+ /* 500 */ 232, 820, 212, 213, 218, 210, 1099, 219, 220, 224,
+ /* 510 */ 225, 226, 223, 207, 1154, 243, 1096, 1095, 244, 345,
+ /* 520 */ 1050, 1061, 1043, 48, 1058, 1059, 1063, 153, 281, 158,
+ /* 530 */ 163, 292, 1024, 175, 1082, 174, 1081, 279, 84, 285,
+ /* 540 */ 1022, 310, 176, 240, 177, 171, 167, 937, 306, 307,
+ /* 550 */ 308, 767, 311, 312, 1040, 165, 166, 46, 287, 289,
+ /* 560 */ 297, 299, 205, 168, 42, 78, 75, 50, 323, 931,
+ /* 570 */ 330, 1161, 113, 1160, 295, 169, 293, 291, 1157, 183,
+ /* 580 */ 337, 1153, 119, 288, 1152, 1149, 184, 957, 43, 40,
+ /* 590 */ 47, 206, 919, 129, 49, 917, 131, 132, 915, 914,
+ /* 600 */ 266, 195, 196, 911, 910, 909, 908, 907, 906, 905,
+ /* 610 */ 199, 201, 902, 900, 898, 896, 203, 893, 204, 889,
+ /* 620 */ 355, 124, 89, 290, 1083, 347, 348, 349, 350, 351,
+ /* 630 */ 352, 353, 363, 869, 230, 250, 305, 267, 268, 868,
+ /* 640 */ 270, 227, 228, 271, 867, 850, 107, 936, 935, 108,
+ /* 650 */ 849, 275, 280, 300, 10, 282, 744, 87, 30, 90,
+ /* 660 */ 913, 912, 904, 186, 958, 190, 185, 187, 144, 191,
+ /* 670 */ 189, 188, 145, 146, 147, 903, 995, 895, 4, 894,
+ /* 680 */ 959, 769, 160, 33, 780, 170, 172, 2, 161, 162,
+ /* 690 */ 774, 91, 242, 776, 92, 1005, 294, 11, 12, 31,
+ /* 700 */ 32, 13, 27, 302, 28, 99, 101, 104, 36, 103,
+ /* 710 */ 642, 37, 105, 677, 675, 674, 673, 671, 670, 669,
+ /* 720 */ 666, 321, 109, 632, 7, 826, 824, 8, 327, 328,
+ /* 730 */ 112, 114, 71, 72, 118, 714, 39, 120, 713, 710,
+ /* 740 */ 658, 656, 648, 654, 650, 652, 646, 644, 680, 679,
+ /* 750 */ 678, 676, 672, 668, 667, 194, 630, 594, 873, 872,
+ /* 760 */ 872, 872, 872, 872, 872, 872, 872, 872, 872, 872,
+ /* 770 */ 872, 149, 150,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 264, 1, 204, 1, 1, 204, 197, 198, 197, 9,
- /* 10 */ 274, 9, 9, 13, 14, 197, 16, 17, 195, 196,
- /* 20 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 221,
- /* 30 */ 9, 223, 224, 33, 34, 197, 228, 37, 38, 39,
- /* 40 */ 232, 197, 234, 235, 219, 220, 5, 45, 46, 47,
- /* 50 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 60 */ 58, 59, 264, 61, 203, 264, 13, 14, 245, 16,
- /* 70 */ 17, 210, 274, 20, 21, 274, 23, 24, 25, 26,
- /* 80 */ 27, 28, 271, 265, 261, 85, 33, 34, 201, 202,
- /* 90 */ 37, 38, 39, 13, 14, 204, 16, 17, 5, 245,
- /* 100 */ 20, 21, 81, 23, 24, 25, 26, 27, 28, 271,
- /* 110 */ 266, 267, 197, 33, 34, 261, 205, 37, 38, 39,
- /* 120 */ 243, 62, 63, 64, 247, 243, 33, 34, 69, 247,
- /* 130 */ 71, 72, 73, 13, 14, 82, 16, 17, 79, 81,
- /* 140 */ 20, 21, 248, 23, 24, 25, 26, 27, 28, 238,
- /* 150 */ 239, 240, 241, 33, 34, 264, 262, 37, 38, 39,
- /* 160 */ 14, 246, 16, 17, 83, 274, 20, 21, 141, 23,
- /* 170 */ 24, 25, 26, 27, 28, 197, 118, 150, 151, 33,
- /* 180 */ 34, 89, 243, 37, 38, 39, 247, 197, 97, 98,
- /* 190 */ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
- /* 200 */ 109, 110, 111, 221, 222, 223, 224, 225, 226, 227,
- /* 210 */ 228, 229, 230, 231, 232, 233, 234, 235, 16, 17,
- /* 220 */ 242, 197, 20, 21, 246, 23, 24, 25, 26, 27,
- /* 230 */ 28, 197, 245, 44, 197, 33, 34, 0, 205, 37,
- /* 240 */ 38, 39, 68, 1, 2, 208, 247, 5, 261, 7,
- /* 250 */ 61, 9, 1, 2, 247, 203, 5, 68, 7, 269,
- /* 260 */ 9, 271, 210, 74, 75, 76, 197, 33, 34, 80,
- /* 270 */ 81, 37, 38, 39, 241, 33, 34, 208, 244, 37,
- /* 280 */ 62, 63, 64, 197, 33, 34, 264, 69, 70, 71,
- /* 290 */ 72, 73, 264, 2, 208, 271, 5, 273, 7, 203,
- /* 300 */ 9, 25, 26, 27, 28, 87, 210, 118, 68, 33,
- /* 310 */ 34, 197, 197, 37, 38, 39, 5, 143, 7, 145,
- /* 320 */ 83, 147, 148, 81, 33, 34, 62, 63, 64, 140,
- /* 330 */ 197, 142, 81, 69, 70, 71, 72, 73, 149, 37,
- /* 340 */ 5, 97, 7, 99, 100, 197, 197, 197, 104, 37,
- /* 350 */ 38, 39, 108, 197, 110, 111, 242, 242, 116, 117,
- /* 360 */ 246, 246, 82, 205, 197, 123, 77, 116, 117, 197,
- /* 370 */ 197, 65, 66, 67, 123, 242, 96, 81, 89, 246,
- /* 380 */ 82, 85, 96, 143, 96, 145, 15, 147, 148, 82,
- /* 390 */ 242, 242, 242, 96, 246, 246, 246, 239, 242, 62,
- /* 400 */ 63, 64, 246, 206, 207, 131, 132, 116, 117, 242,
- /* 410 */ 122, 115, 1, 246, 242, 242, 82, 119, 246, 246,
- /* 420 */ 237, 82, 82, 121, 138, 82, 119, 82, 82, 82,
- /* 430 */ 96, 82, 81, 136, 123, 96, 96, 82, 82, 96,
- /* 440 */ 60, 96, 96, 96, 81, 96, 82, 144, 37, 146,
- /* 450 */ 264, 96, 96, 144, 144, 146, 146, 5, 123, 7,
- /* 460 */ 96, 81, 5, 264, 7, 114, 144, 144, 146, 146,
- /* 470 */ 144, 264, 146, 77, 78, 264, 113, 264, 264, 264,
- /* 480 */ 264, 264, 247, 264, 264, 264, 264, 116, 264, 264,
- /* 490 */ 264, 264, 264, 237, 237, 237, 237, 237, 237, 263,
- /* 500 */ 245, 197, 197, 272, 272, 197, 60, 245, 245, 268,
- /* 510 */ 197, 197, 197, 197, 197, 197, 197, 249, 197, 199,
- /* 520 */ 268, 197, 197, 258, 197, 197, 197, 197, 197, 197,
- /* 530 */ 197, 123, 260, 197, 268, 197, 259, 268, 197, 197,
- /* 540 */ 135, 197, 129, 137, 134, 197, 256, 197, 133, 197,
- /* 550 */ 197, 197, 257, 127, 197, 197, 197, 197, 197, 197,
- /* 560 */ 126, 125, 197, 197, 197, 197, 197, 197, 128, 197,
- /* 570 */ 197, 197, 197, 197, 197, 197, 197, 124, 197, 197,
- /* 580 */ 197, 197, 197, 197, 197, 197, 197, 197, 197, 197,
- /* 590 */ 197, 197, 197, 139, 197, 197, 197, 88, 112, 199,
- /* 600 */ 199, 199, 199, 95, 94, 51, 91, 93, 55, 92,
- /* 610 */ 90, 83, 5, 5, 152, 5, 199, 199, 199, 152,
- /* 620 */ 5, 5, 99, 199, 199, 98, 141, 114, 205, 209,
- /* 630 */ 209, 119, 205, 81, 96, 96, 120, 199, 82, 200,
- /* 640 */ 81, 81, 200, 212, 199, 216, 218, 217, 215, 214,
- /* 650 */ 213, 200, 211, 206, 199, 82, 200, 199, 236, 255,
- /* 660 */ 199, 254, 253, 220, 252, 251, 250, 201, 236, 82,
- /* 670 */ 81, 96, 82, 81, 1, 82, 81, 81, 96, 130,
- /* 680 */ 96, 130, 81, 81, 114, 81, 115, 70, 77, 86,
- /* 690 */ 85, 5, 86, 9, 85, 5, 5, 5, 5, 5,
- /* 700 */ 5, 5, 84, 15, 77, 81, 24, 82, 59, 81,
- /* 710 */ 116, 146, 146, 16, 16, 96, 5, 5, 146, 82,
- /* 720 */ 146, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- /* 730 */ 5, 5, 5, 5, 5, 5, 96, 84, 60, 0,
- /* 740 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 750 */ 275, 275, 21, 21, 275, 275, 275, 275, 275, 275,
- /* 760 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 770 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 780 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 790 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 800 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 810 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 820 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 830 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 840 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 850 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 860 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 870 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 880 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 890 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 900 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 910 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 920 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 930 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275,
- /* 940 */ 275, 275, 275, 275, 275, 275, 275, 275, 275,
+ /* 0 */ 266, 1, 199, 200, 199, 266, 245, 5, 266, 9,
+ /* 10 */ 249, 197, 198, 13, 14, 253, 16, 17, 247, 277,
+ /* 20 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29,
+ /* 30 */ 30, 199, 9, 271, 263, 35, 36, 35, 36, 39,
+ /* 40 */ 40, 41, 13, 14, 245, 16, 17, 206, 249, 20,
+ /* 50 */ 21, 22, 23, 266, 25, 26, 27, 28, 29, 30,
+ /* 60 */ 223, 247, 225, 226, 35, 36, 5, 230, 39, 40,
+ /* 70 */ 41, 234, 267, 236, 237, 35, 36, 263, 0, 39,
+ /* 80 */ 40, 41, 13, 14, 199, 16, 17, 199, 88, 20,
+ /* 90 */ 21, 22, 23, 84, 25, 26, 27, 28, 29, 30,
+ /* 100 */ 268, 269, 245, 39, 35, 36, 249, 266, 39, 40,
+ /* 110 */ 41, 199, 13, 14, 85, 16, 17, 247, 277, 20,
+ /* 120 */ 21, 22, 23, 1, 25, 26, 27, 28, 29, 30,
+ /* 130 */ 121, 9, 244, 263, 35, 36, 248, 199, 39, 40,
+ /* 140 */ 41, 14, 199, 16, 17, 127, 128, 20, 21, 22,
+ /* 150 */ 23, 250, 25, 26, 27, 28, 29, 30, 199, 274,
+ /* 160 */ 248, 276, 35, 36, 86, 264, 39, 40, 41, 47,
+ /* 170 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ /* 180 */ 58, 59, 60, 61, 86, 63, 16, 17, 124, 246,
+ /* 190 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29,
+ /* 200 */ 30, 206, 9, 244, 92, 35, 36, 248, 266, 39,
+ /* 210 */ 40, 41, 274, 100, 101, 102, 103, 104, 105, 106,
+ /* 220 */ 107, 108, 109, 110, 111, 112, 113, 114, 1, 199,
+ /* 230 */ 208, 209, 46, 223, 224, 225, 226, 227, 228, 229,
+ /* 240 */ 230, 231, 232, 233, 234, 235, 236, 237, 206, 63,
+ /* 250 */ 199, 1, 2, 205, 207, 5, 70, 7, 199, 9,
+ /* 260 */ 212, 266, 76, 77, 78, 79, 39, 1, 2, 83,
+ /* 270 */ 84, 5, 277, 7, 244, 9, 249, 84, 248, 27,
+ /* 280 */ 28, 29, 30, 199, 249, 35, 36, 35, 36, 39,
+ /* 290 */ 243, 39, 40, 41, 199, 244, 39, 40, 41, 248,
+ /* 300 */ 144, 35, 36, 70, 64, 65, 66, 121, 266, 153,
+ /* 310 */ 154, 71, 72, 73, 74, 75, 199, 99, 2, 277,
+ /* 320 */ 205, 5, 5, 7, 7, 9, 70, 212, 244, 143,
+ /* 330 */ 90, 145, 248, 274, 84, 64, 65, 66, 152, 244,
+ /* 340 */ 221, 222, 71, 248, 73, 74, 75, 64, 65, 66,
+ /* 350 */ 84, 35, 36, 82, 71, 72, 73, 74, 75, 141,
+ /* 360 */ 100, 207, 102, 103, 78, 205, 199, 107, 199, 119,
+ /* 370 */ 120, 111, 212, 113, 114, 199, 126, 199, 199, 146,
+ /* 380 */ 5, 148, 7, 150, 151, 119, 120, 199, 80, 272,
+ /* 390 */ 199, 274, 126, 15, 240, 241, 242, 243, 210, 84,
+ /* 400 */ 92, 210, 146, 88, 148, 199, 150, 151, 64, 65,
+ /* 410 */ 66, 244, 99, 244, 207, 248, 210, 248, 203, 204,
+ /* 420 */ 244, 135, 244, 244, 248, 85, 248, 248, 67, 68,
+ /* 430 */ 69, 85, 85, 118, 99, 119, 120, 85, 125, 99,
+ /* 440 */ 85, 85, 85, 126, 85, 85, 266, 85, 241, 85,
+ /* 450 */ 84, 99, 85, 85, 99, 99, 99, 62, 99, 99,
+ /* 460 */ 84, 99, 85, 99, 80, 81, 99, 99, 122, 122,
+ /* 470 */ 147, 147, 149, 149, 139, 147, 99, 149, 5, 84,
+ /* 480 */ 7, 199, 5, 117, 7, 147, 147, 149, 149, 147,
+ /* 490 */ 266, 149, 116, 239, 266, 266, 266, 119, 266, 199,
+ /* 500 */ 266, 126, 266, 266, 266, 266, 239, 266, 266, 266,
+ /* 510 */ 266, 266, 266, 266, 249, 239, 239, 239, 239, 239,
+ /* 520 */ 199, 199, 247, 265, 199, 199, 199, 62, 247, 199,
+ /* 530 */ 199, 199, 247, 199, 275, 251, 275, 201, 201, 270,
+ /* 540 */ 199, 91, 199, 270, 199, 255, 259, 199, 199, 199,
+ /* 550 */ 199, 126, 199, 199, 262, 261, 260, 199, 270, 270,
+ /* 560 */ 136, 133, 199, 258, 199, 138, 140, 137, 199, 199,
+ /* 570 */ 199, 199, 199, 199, 131, 257, 130, 129, 199, 199,
+ /* 580 */ 199, 199, 199, 132, 199, 199, 199, 199, 199, 199,
+ /* 590 */ 199, 199, 199, 199, 142, 199, 199, 199, 199, 199,
+ /* 600 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ /* 610 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ /* 620 */ 115, 98, 201, 201, 201, 97, 53, 94, 96, 57,
+ /* 630 */ 95, 93, 86, 5, 201, 201, 201, 155, 5, 5,
+ /* 640 */ 155, 201, 201, 5, 5, 102, 207, 211, 211, 207,
+ /* 650 */ 101, 144, 122, 117, 84, 99, 85, 123, 84, 99,
+ /* 660 */ 201, 201, 201, 218, 220, 216, 219, 214, 202, 213,
+ /* 670 */ 215, 217, 202, 202, 202, 201, 238, 201, 203, 201,
+ /* 680 */ 222, 85, 84, 252, 85, 256, 254, 208, 84, 99,
+ /* 690 */ 85, 84, 1, 85, 84, 238, 84, 134, 134, 99,
+ /* 700 */ 99, 84, 84, 117, 84, 118, 80, 72, 89, 88,
+ /* 710 */ 5, 89, 88, 9, 5, 5, 5, 5, 5, 5,
+ /* 720 */ 5, 15, 80, 87, 84, 119, 85, 84, 26, 61,
+ /* 730 */ 149, 149, 16, 16, 149, 5, 99, 149, 5, 85,
+ /* 740 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ /* 750 */ 5, 5, 5, 5, 5, 99, 87, 62, 0, 278,
+ /* 760 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 770 */ 278, 21, 21, 278, 278, 278, 278, 278, 278, 278,
+ /* 780 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 790 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 800 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 810 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 820 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 830 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 840 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 850 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 860 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 870 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 880 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 890 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 900 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 910 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 920 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 930 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 940 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 950 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 960 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
};
-#define YY_SHIFT_COUNT (361)
+#define YY_SHIFT_COUNT (367)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (739)
+#define YY_SHIFT_MAX (758)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 189, 91, 91, 244, 244, 81, 242, 251, 251, 21,
- /* 10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 20 */ 3, 3, 3, 0, 2, 251, 291, 291, 291, 58,
- /* 30 */ 58, 3, 3, 3, 237, 3, 3, 3, 3, 289,
- /* 40 */ 81, 92, 92, 41, 754, 754, 754, 251, 251, 251,
- /* 50 */ 251, 251, 251, 251, 251, 251, 251, 251, 251, 251,
- /* 60 */ 251, 251, 251, 251, 251, 251, 251, 291, 291, 291,
- /* 70 */ 93, 93, 93, 93, 93, 93, 93, 3, 3, 3,
- /* 80 */ 302, 3, 3, 3, 58, 58, 3, 3, 3, 3,
- /* 90 */ 274, 274, 288, 58, 3, 3, 3, 3, 3, 3,
- /* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 120 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 130 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 140 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- /* 150 */ 3, 3, 446, 446, 446, 408, 408, 408, 408, 446,
- /* 160 */ 446, 405, 406, 413, 410, 415, 426, 434, 436, 440,
- /* 170 */ 453, 454, 446, 446, 446, 509, 509, 486, 81, 81,
- /* 180 */ 446, 446, 508, 510, 554, 515, 514, 553, 517, 520,
- /* 190 */ 486, 41, 446, 528, 528, 446, 528, 446, 528, 446,
- /* 200 */ 446, 754, 754, 53, 80, 80, 120, 80, 146, 202,
- /* 210 */ 218, 276, 276, 276, 276, 59, 264, 234, 234, 234,
- /* 220 */ 234, 174, 240, 27, 296, 312, 312, 311, 335, 306,
- /* 230 */ 337, 280, 298, 307, 334, 339, 340, 286, 297, 343,
- /* 240 */ 345, 346, 347, 349, 351, 355, 356, 411, 380, 371,
- /* 250 */ 364, 303, 309, 310, 452, 457, 322, 323, 363, 326,
- /* 260 */ 396, 607, 462, 608, 610, 467, 615, 616, 523, 527,
- /* 270 */ 485, 512, 513, 552, 516, 556, 559, 538, 539, 573,
- /* 280 */ 560, 587, 589, 590, 575, 592, 593, 595, 673, 596,
- /* 290 */ 582, 549, 584, 551, 601, 513, 602, 570, 604, 571,
- /* 300 */ 611, 603, 605, 617, 686, 606, 609, 684, 690, 691,
- /* 310 */ 692, 693, 694, 695, 696, 618, 688, 627, 624, 625,
- /* 320 */ 594, 628, 682, 649, 697, 565, 566, 619, 619, 619,
- /* 330 */ 619, 698, 572, 574, 619, 619, 619, 711, 712, 637,
- /* 340 */ 619, 716, 717, 718, 719, 720, 721, 722, 723, 724,
- /* 350 */ 725, 726, 727, 728, 729, 730, 640, 653, 731, 732,
- /* 360 */ 678, 739,
+ /* 0 */ 186, 113, 113, 260, 260, 98, 250, 266, 266, 193,
+ /* 10 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 20 */ 23, 23, 23, 0, 122, 266, 316, 316, 316, 9,
+ /* 30 */ 9, 23, 23, 18, 23, 78, 23, 23, 23, 23,
+ /* 40 */ 308, 98, 112, 112, 61, 773, 773, 773, 266, 266,
+ /* 50 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266,
+ /* 60 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266,
+ /* 70 */ 316, 316, 316, 2, 2, 2, 2, 2, 2, 2,
+ /* 80 */ 23, 23, 23, 64, 23, 23, 23, 9, 9, 23,
+ /* 90 */ 23, 23, 23, 286, 286, 313, 9, 23, 23, 23,
+ /* 100 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 110 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 120 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 130 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 140 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ /* 150 */ 23, 23, 23, 23, 23, 23, 465, 465, 465, 425,
+ /* 160 */ 425, 425, 425, 465, 465, 427, 426, 428, 430, 424,
+ /* 170 */ 443, 446, 448, 451, 452, 465, 465, 465, 450, 450,
+ /* 180 */ 505, 98, 98, 465, 465, 523, 528, 573, 533, 532,
+ /* 190 */ 572, 535, 538, 505, 61, 465, 465, 546, 546, 465,
+ /* 200 */ 546, 465, 546, 465, 465, 773, 773, 29, 69, 69,
+ /* 210 */ 99, 69, 127, 170, 240, 252, 252, 252, 252, 252,
+ /* 220 */ 252, 271, 283, 40, 40, 40, 40, 233, 256, 156,
+ /* 230 */ 315, 257, 257, 317, 375, 361, 344, 340, 346, 347,
+ /* 240 */ 352, 355, 356, 218, 335, 357, 359, 360, 362, 364,
+ /* 250 */ 366, 367, 368, 227, 395, 378, 377, 323, 324, 328,
+ /* 260 */ 473, 477, 338, 339, 376, 342, 384, 628, 482, 633,
+ /* 270 */ 634, 485, 638, 639, 543, 549, 507, 530, 536, 570,
+ /* 280 */ 534, 571, 574, 556, 560, 596, 598, 599, 604, 605,
+ /* 290 */ 590, 607, 608, 610, 691, 612, 600, 563, 601, 564,
+ /* 300 */ 617, 536, 618, 586, 620, 587, 626, 619, 621, 635,
+ /* 310 */ 705, 622, 624, 704, 709, 710, 711, 712, 713, 714,
+ /* 320 */ 715, 636, 706, 642, 640, 641, 606, 643, 702, 668,
+ /* 330 */ 716, 581, 582, 637, 637, 637, 637, 717, 585, 588,
+ /* 340 */ 637, 637, 637, 730, 733, 654, 637, 735, 736, 737,
+ /* 350 */ 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ /* 360 */ 748, 749, 656, 669, 750, 751, 695, 758,
};
-#define YY_REDUCE_COUNT (202)
-#define YY_REDUCE_MIN (-264)
-#define YY_REDUCE_MAX (466)
+#define YY_REDUCE_COUNT (206)
+#define YY_REDUCE_MIN (-266)
+#define YY_REDUCE_MAX (479)
static const short yy_reduce_ofst[] = {
- /* 0 */ -177, -18, -18, -192, -192, -89, -202, -199, -109, -156,
- /* 10 */ -22, 24, -10, 114, 115, 133, 148, 149, 150, 156,
- /* 20 */ 167, 172, 173, -182, -191, -264, -123, -118, -61, -146,
- /* 30 */ -13, -189, -162, 34, 33, 37, 69, 86, -85, -139,
- /* 40 */ 158, 52, 96, -175, -106, 197, -113, 22, 28, 186,
- /* 50 */ 199, 207, 211, 213, 214, 215, 216, 217, 219, 220,
- /* 60 */ 221, 222, 224, 225, 226, 227, 228, -1, 7, 235,
- /* 70 */ 183, 256, 257, 258, 259, 260, 261, 304, 305, 308,
- /* 80 */ 236, 313, 314, 315, 255, 262, 316, 317, 318, 319,
- /* 90 */ 231, 232, 268, 263, 321, 324, 325, 327, 328, 329,
- /* 100 */ 330, 331, 332, 333, 336, 338, 341, 342, 344, 348,
- /* 110 */ 350, 352, 353, 354, 357, 358, 359, 360, 361, 362,
- /* 120 */ 365, 366, 367, 368, 369, 370, 372, 373, 374, 375,
- /* 130 */ 376, 377, 378, 379, 381, 382, 383, 384, 385, 386,
- /* 140 */ 387, 388, 389, 390, 391, 392, 393, 394, 395, 397,
- /* 150 */ 398, 399, 320, 400, 401, 241, 252, 266, 269, 402,
- /* 160 */ 403, 272, 277, 265, 295, 290, 404, 407, 409, 412,
- /* 170 */ 414, 416, 417, 418, 419, 420, 421, 422, 423, 427,
- /* 180 */ 424, 425, 428, 430, 429, 431, 433, 437, 435, 441,
- /* 190 */ 432, 443, 438, 439, 442, 445, 451, 455, 456, 458,
- /* 200 */ 461, 447, 466,
+ /* 0 */ -186, 10, 10, -163, -163, 154, -159, -5, 42, -168,
+ /* 10 */ -112, -115, 117, -41, 30, 51, 84, 95, 167, 169,
+ /* 20 */ 176, 178, 179, -195, -197, -258, -239, -201, -143, -229,
+ /* 30 */ -130, -62, 59, -238, -57, 47, 188, 191, 206, -88,
+ /* 40 */ 48, 207, 115, 160, 119, -99, 22, 215, -266, -261,
+ /* 50 */ -213, -58, 180, 224, 228, 229, 230, 232, 234, 236,
+ /* 60 */ 237, 238, 239, 241, 242, 243, 244, 245, 246, 247,
+ /* 70 */ 27, 35, 265, 254, 267, 276, 277, 278, 279, 280,
+ /* 80 */ 282, 300, 321, 258, 322, 325, 326, 275, 281, 327,
+ /* 90 */ 330, 331, 332, 259, 261, 284, 285, 334, 341, 343,
+ /* 100 */ 345, 348, 349, 350, 351, 353, 354, 358, 363, 365,
+ /* 110 */ 369, 370, 371, 372, 373, 374, 379, 380, 381, 382,
+ /* 120 */ 383, 385, 386, 387, 388, 389, 390, 391, 392, 393,
+ /* 130 */ 394, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ /* 140 */ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414,
+ /* 150 */ 415, 416, 417, 418, 419, 420, 336, 337, 421, 269,
+ /* 160 */ 273, 288, 289, 422, 423, 292, 294, 296, 287, 305,
+ /* 170 */ 318, 429, 290, 432, 431, 433, 434, 435, 436, 437,
+ /* 180 */ 438, 439, 442, 440, 441, 444, 447, 445, 453, 454,
+ /* 190 */ 455, 449, 456, 457, 458, 459, 460, 466, 470, 461,
+ /* 200 */ 471, 474, 472, 476, 478, 479, 475,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 856, 979, 918, 989, 905, 915, 1126, 1126, 1126, 856,
- /* 10 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 20 */ 856, 856, 856, 1037, 876, 1126, 856, 856, 856, 856,
- /* 30 */ 856, 856, 856, 856, 915, 856, 856, 856, 856, 925,
- /* 40 */ 915, 925, 925, 856, 1032, 963, 981, 856, 856, 856,
- /* 50 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 60 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 70 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 80 */ 1039, 1045, 1042, 856, 856, 856, 1047, 856, 856, 856,
- /* 90 */ 1069, 1069, 1030, 856, 856, 856, 856, 856, 856, 856,
- /* 100 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 110 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 120 */ 856, 856, 856, 856, 856, 856, 903, 856, 901, 856,
- /* 130 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 140 */ 856, 856, 856, 856, 887, 856, 856, 856, 856, 856,
- /* 150 */ 856, 874, 878, 878, 878, 856, 856, 856, 856, 878,
- /* 160 */ 878, 1076, 1080, 1062, 1074, 1070, 1057, 1055, 1053, 1061,
- /* 170 */ 1052, 1084, 878, 878, 878, 923, 923, 919, 915, 915,
- /* 180 */ 878, 878, 941, 939, 937, 929, 935, 931, 933, 927,
- /* 190 */ 906, 856, 878, 913, 913, 878, 913, 878, 913, 878,
- /* 200 */ 878, 963, 981, 856, 1085, 1075, 856, 1125, 1115, 1114,
- /* 210 */ 856, 1121, 1113, 1112, 1111, 856, 856, 1107, 1110, 1109,
- /* 220 */ 1108, 856, 856, 856, 856, 1117, 1116, 856, 856, 856,
- /* 230 */ 856, 856, 856, 856, 856, 856, 856, 1081, 1077, 856,
- /* 240 */ 856, 856, 856, 856, 856, 856, 856, 856, 1087, 856,
- /* 250 */ 856, 856, 856, 856, 856, 856, 856, 856, 991, 856,
- /* 260 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 270 */ 856, 1029, 856, 856, 856, 856, 856, 1041, 1040, 856,
- /* 280 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 290 */ 1071, 856, 1063, 856, 856, 1003, 856, 856, 856, 856,
- /* 300 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 310 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 320 */ 856, 856, 856, 856, 856, 856, 856, 1144, 1139, 1140,
- /* 330 */ 1137, 856, 856, 856, 1136, 1131, 1132, 856, 856, 856,
- /* 340 */ 1129, 856, 856, 856, 856, 856, 856, 856, 856, 856,
- /* 350 */ 856, 856, 856, 856, 856, 856, 947, 856, 885, 883,
- /* 360 */ 856, 856,
+ /* 0 */ 870, 994, 933, 1004, 920, 930, 1145, 1145, 1145, 870,
+ /* 10 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 20 */ 870, 870, 870, 1052, 890, 1145, 870, 870, 870, 870,
+ /* 30 */ 870, 870, 870, 1067, 870, 930, 870, 870, 870, 870,
+ /* 40 */ 940, 930, 940, 940, 870, 1047, 978, 996, 870, 870,
+ /* 50 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 60 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 70 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 80 */ 870, 870, 870, 1054, 1060, 1057, 870, 870, 870, 1062,
+ /* 90 */ 870, 870, 870, 1086, 1086, 1045, 870, 870, 870, 870,
+ /* 100 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 110 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 120 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 918,
+ /* 130 */ 870, 916, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 140 */ 870, 870, 870, 870, 870, 870, 870, 870, 901, 870,
+ /* 150 */ 870, 870, 870, 870, 870, 888, 892, 892, 892, 870,
+ /* 160 */ 870, 870, 870, 892, 892, 1093, 1097, 1079, 1091, 1087,
+ /* 170 */ 1074, 1072, 1070, 1078, 1101, 892, 892, 892, 938, 938,
+ /* 180 */ 934, 930, 930, 892, 892, 956, 954, 952, 944, 950,
+ /* 190 */ 946, 948, 942, 921, 870, 892, 892, 928, 928, 892,
+ /* 200 */ 928, 892, 928, 892, 892, 978, 996, 870, 1102, 1092,
+ /* 210 */ 870, 1144, 1132, 1131, 870, 1140, 1139, 1138, 1130, 1129,
+ /* 220 */ 1128, 870, 870, 1124, 1127, 1126, 1125, 870, 870, 870,
+ /* 230 */ 870, 1134, 1133, 870, 870, 870, 870, 870, 870, 870,
+ /* 240 */ 870, 870, 870, 1098, 1094, 870, 870, 870, 870, 870,
+ /* 250 */ 870, 870, 870, 870, 1104, 870, 870, 870, 870, 870,
+ /* 260 */ 870, 870, 870, 870, 1006, 870, 870, 870, 870, 870,
+ /* 270 */ 870, 870, 870, 870, 870, 870, 870, 1044, 870, 870,
+ /* 280 */ 870, 870, 870, 1056, 1055, 870, 870, 870, 870, 870,
+ /* 290 */ 870, 870, 870, 870, 870, 870, 1088, 870, 1080, 870,
+ /* 300 */ 870, 1018, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 310 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 320 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 330 */ 870, 870, 870, 1163, 1158, 1159, 1156, 870, 870, 870,
+ /* 340 */ 1155, 1150, 1151, 870, 870, 870, 1148, 870, 870, 870,
+ /* 350 */ 870, 870, 870, 870, 870, 870, 870, 870, 870, 870,
+ /* 360 */ 870, 870, 962, 870, 899, 897, 870, 870,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -540,6 +545,8 @@ static const YYCODETYPE yyFallback[] = {
0, /* NOTNULL => nothing */
0, /* IS => nothing */
1, /* LIKE => ID */
+ 1, /* MATCH => ID */
+ 1, /* NMATCH => ID */
1, /* GLOB => ID */
0, /* BETWEEN => nothing */
0, /* IN => nothing */
@@ -594,6 +601,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* ACCOUNT => nothing */
0, /* USE => nothing */
0, /* DESCRIBE => nothing */
+ 1, /* DESC => ID */
0, /* ALTER => nothing */
0, /* PASS => nothing */
0, /* PRIVILEGE => nothing */
@@ -643,6 +651,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* FROM => nothing */
0, /* VARIABLE => nothing */
0, /* INTERVAL => nothing */
+ 0, /* EVERY => nothing */
0, /* SESSION => nothing */
0, /* STATE_WINDOW => nothing */
0, /* FILL => nothing */
@@ -650,7 +659,6 @@ static const YYCODETYPE yyFallback[] = {
0, /* ORDER => nothing */
0, /* BY => nothing */
1, /* ASC => ID */
- 1, /* DESC => ID */
0, /* GROUP => nothing */
0, /* HAVING => nothing */
0, /* LIMIT => nothing */
@@ -692,7 +700,6 @@ static const YYCODETYPE yyFallback[] = {
1, /* IMMEDIATE => ID */
1, /* INITIALLY => ID */
1, /* INSTEAD => ID */
- 1, /* MATCH => ID */
1, /* KEY => ID */
1, /* OF => ID */
1, /* RAISE => ID */
@@ -822,259 +829,262 @@ static const char *const yyTokenName[] = {
/* 19 */ "NOTNULL",
/* 20 */ "IS",
/* 21 */ "LIKE",
- /* 22 */ "GLOB",
- /* 23 */ "BETWEEN",
- /* 24 */ "IN",
- /* 25 */ "GT",
- /* 26 */ "GE",
- /* 27 */ "LT",
- /* 28 */ "LE",
- /* 29 */ "BITAND",
- /* 30 */ "BITOR",
- /* 31 */ "LSHIFT",
- /* 32 */ "RSHIFT",
- /* 33 */ "PLUS",
- /* 34 */ "MINUS",
- /* 35 */ "DIVIDE",
- /* 36 */ "TIMES",
- /* 37 */ "STAR",
- /* 38 */ "SLASH",
- /* 39 */ "REM",
- /* 40 */ "CONCAT",
- /* 41 */ "UMINUS",
- /* 42 */ "UPLUS",
- /* 43 */ "BITNOT",
- /* 44 */ "SHOW",
- /* 45 */ "DATABASES",
- /* 46 */ "TOPICS",
- /* 47 */ "FUNCTIONS",
- /* 48 */ "MNODES",
- /* 49 */ "DNODES",
- /* 50 */ "ACCOUNTS",
- /* 51 */ "USERS",
- /* 52 */ "MODULES",
- /* 53 */ "QUERIES",
- /* 54 */ "CONNECTIONS",
- /* 55 */ "STREAMS",
- /* 56 */ "VARIABLES",
- /* 57 */ "SCORES",
- /* 58 */ "GRANTS",
- /* 59 */ "VNODES",
- /* 60 */ "DOT",
- /* 61 */ "CREATE",
- /* 62 */ "TABLE",
- /* 63 */ "STABLE",
- /* 64 */ "DATABASE",
- /* 65 */ "TABLES",
- /* 66 */ "STABLES",
- /* 67 */ "VGROUPS",
- /* 68 */ "DROP",
- /* 69 */ "TOPIC",
- /* 70 */ "FUNCTION",
- /* 71 */ "DNODE",
- /* 72 */ "USER",
- /* 73 */ "ACCOUNT",
- /* 74 */ "USE",
- /* 75 */ "DESCRIBE",
- /* 76 */ "ALTER",
- /* 77 */ "PASS",
- /* 78 */ "PRIVILEGE",
- /* 79 */ "LOCAL",
- /* 80 */ "COMPACT",
- /* 81 */ "LP",
- /* 82 */ "RP",
- /* 83 */ "IF",
- /* 84 */ "EXISTS",
- /* 85 */ "AS",
- /* 86 */ "OUTPUTTYPE",
- /* 87 */ "AGGREGATE",
- /* 88 */ "BUFSIZE",
- /* 89 */ "PPS",
- /* 90 */ "TSERIES",
- /* 91 */ "DBS",
- /* 92 */ "STORAGE",
- /* 93 */ "QTIME",
- /* 94 */ "CONNS",
- /* 95 */ "STATE",
- /* 96 */ "COMMA",
- /* 97 */ "KEEP",
- /* 98 */ "CACHE",
- /* 99 */ "REPLICA",
- /* 100 */ "QUORUM",
- /* 101 */ "DAYS",
- /* 102 */ "MINROWS",
- /* 103 */ "MAXROWS",
- /* 104 */ "BLOCKS",
- /* 105 */ "CTIME",
- /* 106 */ "WAL",
- /* 107 */ "FSYNC",
- /* 108 */ "COMP",
- /* 109 */ "PRECISION",
- /* 110 */ "UPDATE",
- /* 111 */ "CACHELAST",
- /* 112 */ "PARTITIONS",
- /* 113 */ "UNSIGNED",
- /* 114 */ "TAGS",
- /* 115 */ "USING",
- /* 116 */ "NULL",
- /* 117 */ "NOW",
- /* 118 */ "SELECT",
- /* 119 */ "UNION",
- /* 120 */ "ALL",
- /* 121 */ "DISTINCT",
- /* 122 */ "FROM",
- /* 123 */ "VARIABLE",
- /* 124 */ "INTERVAL",
- /* 125 */ "SESSION",
- /* 126 */ "STATE_WINDOW",
- /* 127 */ "FILL",
- /* 128 */ "SLIDING",
- /* 129 */ "ORDER",
- /* 130 */ "BY",
- /* 131 */ "ASC",
- /* 132 */ "DESC",
- /* 133 */ "GROUP",
- /* 134 */ "HAVING",
- /* 135 */ "LIMIT",
- /* 136 */ "OFFSET",
- /* 137 */ "SLIMIT",
- /* 138 */ "SOFFSET",
- /* 139 */ "WHERE",
- /* 140 */ "RESET",
- /* 141 */ "QUERY",
- /* 142 */ "SYNCDB",
- /* 143 */ "ADD",
- /* 144 */ "COLUMN",
- /* 145 */ "MODIFY",
- /* 146 */ "TAG",
- /* 147 */ "CHANGE",
- /* 148 */ "SET",
- /* 149 */ "KILL",
- /* 150 */ "CONNECTION",
- /* 151 */ "STREAM",
- /* 152 */ "COLON",
- /* 153 */ "ABORT",
- /* 154 */ "AFTER",
- /* 155 */ "ATTACH",
- /* 156 */ "BEFORE",
- /* 157 */ "BEGIN",
- /* 158 */ "CASCADE",
- /* 159 */ "CLUSTER",
- /* 160 */ "CONFLICT",
- /* 161 */ "COPY",
- /* 162 */ "DEFERRED",
- /* 163 */ "DELIMITERS",
- /* 164 */ "DETACH",
- /* 165 */ "EACH",
- /* 166 */ "END",
- /* 167 */ "EXPLAIN",
- /* 168 */ "FAIL",
- /* 169 */ "FOR",
- /* 170 */ "IGNORE",
- /* 171 */ "IMMEDIATE",
- /* 172 */ "INITIALLY",
- /* 173 */ "INSTEAD",
- /* 174 */ "MATCH",
- /* 175 */ "KEY",
- /* 176 */ "OF",
- /* 177 */ "RAISE",
- /* 178 */ "REPLACE",
- /* 179 */ "RESTRICT",
- /* 180 */ "ROW",
- /* 181 */ "STATEMENT",
- /* 182 */ "TRIGGER",
- /* 183 */ "VIEW",
- /* 184 */ "IPTOKEN",
- /* 185 */ "SEMI",
- /* 186 */ "NONE",
- /* 187 */ "PREV",
- /* 188 */ "LINEAR",
- /* 189 */ "IMPORT",
- /* 190 */ "TBNAME",
- /* 191 */ "JOIN",
- /* 192 */ "INSERT",
- /* 193 */ "INTO",
- /* 194 */ "VALUES",
- /* 195 */ "program",
- /* 196 */ "cmd",
- /* 197 */ "ids",
- /* 198 */ "dbPrefix",
- /* 199 */ "cpxName",
- /* 200 */ "ifexists",
- /* 201 */ "alter_db_optr",
- /* 202 */ "alter_topic_optr",
- /* 203 */ "acct_optr",
- /* 204 */ "exprlist",
- /* 205 */ "ifnotexists",
- /* 206 */ "db_optr",
- /* 207 */ "topic_optr",
- /* 208 */ "typename",
- /* 209 */ "bufsize",
- /* 210 */ "pps",
- /* 211 */ "tseries",
- /* 212 */ "dbs",
- /* 213 */ "streams",
- /* 214 */ "storage",
- /* 215 */ "qtime",
- /* 216 */ "users",
- /* 217 */ "conns",
- /* 218 */ "state",
- /* 219 */ "intitemlist",
- /* 220 */ "intitem",
- /* 221 */ "keep",
- /* 222 */ "cache",
- /* 223 */ "replica",
- /* 224 */ "quorum",
- /* 225 */ "days",
- /* 226 */ "minrows",
- /* 227 */ "maxrows",
- /* 228 */ "blocks",
- /* 229 */ "ctime",
- /* 230 */ "wal",
- /* 231 */ "fsync",
- /* 232 */ "comp",
- /* 233 */ "prec",
- /* 234 */ "update",
- /* 235 */ "cachelast",
- /* 236 */ "partitions",
- /* 237 */ "signed",
- /* 238 */ "create_table_args",
- /* 239 */ "create_stable_args",
- /* 240 */ "create_table_list",
- /* 241 */ "create_from_stable",
- /* 242 */ "columnlist",
- /* 243 */ "tagitemlist",
- /* 244 */ "tagNamelist",
- /* 245 */ "select",
- /* 246 */ "column",
- /* 247 */ "tagitem",
- /* 248 */ "selcollist",
- /* 249 */ "from",
- /* 250 */ "where_opt",
- /* 251 */ "interval_opt",
- /* 252 */ "sliding_opt",
- /* 253 */ "session_option",
- /* 254 */ "windowstate_option",
- /* 255 */ "fill_opt",
- /* 256 */ "groupby_opt",
- /* 257 */ "having_opt",
- /* 258 */ "orderby_opt",
- /* 259 */ "slimit_opt",
- /* 260 */ "limit_opt",
- /* 261 */ "union",
- /* 262 */ "sclp",
- /* 263 */ "distinct",
- /* 264 */ "expr",
- /* 265 */ "as",
- /* 266 */ "tablelist",
- /* 267 */ "sub",
- /* 268 */ "tmvar",
- /* 269 */ "sortlist",
- /* 270 */ "sortitem",
- /* 271 */ "item",
- /* 272 */ "sortorder",
- /* 273 */ "grouplist",
- /* 274 */ "expritem",
+ /* 22 */ "MATCH",
+ /* 23 */ "NMATCH",
+ /* 24 */ "GLOB",
+ /* 25 */ "BETWEEN",
+ /* 26 */ "IN",
+ /* 27 */ "GT",
+ /* 28 */ "GE",
+ /* 29 */ "LT",
+ /* 30 */ "LE",
+ /* 31 */ "BITAND",
+ /* 32 */ "BITOR",
+ /* 33 */ "LSHIFT",
+ /* 34 */ "RSHIFT",
+ /* 35 */ "PLUS",
+ /* 36 */ "MINUS",
+ /* 37 */ "DIVIDE",
+ /* 38 */ "TIMES",
+ /* 39 */ "STAR",
+ /* 40 */ "SLASH",
+ /* 41 */ "REM",
+ /* 42 */ "CONCAT",
+ /* 43 */ "UMINUS",
+ /* 44 */ "UPLUS",
+ /* 45 */ "BITNOT",
+ /* 46 */ "SHOW",
+ /* 47 */ "DATABASES",
+ /* 48 */ "TOPICS",
+ /* 49 */ "FUNCTIONS",
+ /* 50 */ "MNODES",
+ /* 51 */ "DNODES",
+ /* 52 */ "ACCOUNTS",
+ /* 53 */ "USERS",
+ /* 54 */ "MODULES",
+ /* 55 */ "QUERIES",
+ /* 56 */ "CONNECTIONS",
+ /* 57 */ "STREAMS",
+ /* 58 */ "VARIABLES",
+ /* 59 */ "SCORES",
+ /* 60 */ "GRANTS",
+ /* 61 */ "VNODES",
+ /* 62 */ "DOT",
+ /* 63 */ "CREATE",
+ /* 64 */ "TABLE",
+ /* 65 */ "STABLE",
+ /* 66 */ "DATABASE",
+ /* 67 */ "TABLES",
+ /* 68 */ "STABLES",
+ /* 69 */ "VGROUPS",
+ /* 70 */ "DROP",
+ /* 71 */ "TOPIC",
+ /* 72 */ "FUNCTION",
+ /* 73 */ "DNODE",
+ /* 74 */ "USER",
+ /* 75 */ "ACCOUNT",
+ /* 76 */ "USE",
+ /* 77 */ "DESCRIBE",
+ /* 78 */ "DESC",
+ /* 79 */ "ALTER",
+ /* 80 */ "PASS",
+ /* 81 */ "PRIVILEGE",
+ /* 82 */ "LOCAL",
+ /* 83 */ "COMPACT",
+ /* 84 */ "LP",
+ /* 85 */ "RP",
+ /* 86 */ "IF",
+ /* 87 */ "EXISTS",
+ /* 88 */ "AS",
+ /* 89 */ "OUTPUTTYPE",
+ /* 90 */ "AGGREGATE",
+ /* 91 */ "BUFSIZE",
+ /* 92 */ "PPS",
+ /* 93 */ "TSERIES",
+ /* 94 */ "DBS",
+ /* 95 */ "STORAGE",
+ /* 96 */ "QTIME",
+ /* 97 */ "CONNS",
+ /* 98 */ "STATE",
+ /* 99 */ "COMMA",
+ /* 100 */ "KEEP",
+ /* 101 */ "CACHE",
+ /* 102 */ "REPLICA",
+ /* 103 */ "QUORUM",
+ /* 104 */ "DAYS",
+ /* 105 */ "MINROWS",
+ /* 106 */ "MAXROWS",
+ /* 107 */ "BLOCKS",
+ /* 108 */ "CTIME",
+ /* 109 */ "WAL",
+ /* 110 */ "FSYNC",
+ /* 111 */ "COMP",
+ /* 112 */ "PRECISION",
+ /* 113 */ "UPDATE",
+ /* 114 */ "CACHELAST",
+ /* 115 */ "PARTITIONS",
+ /* 116 */ "UNSIGNED",
+ /* 117 */ "TAGS",
+ /* 118 */ "USING",
+ /* 119 */ "NULL",
+ /* 120 */ "NOW",
+ /* 121 */ "SELECT",
+ /* 122 */ "UNION",
+ /* 123 */ "ALL",
+ /* 124 */ "DISTINCT",
+ /* 125 */ "FROM",
+ /* 126 */ "VARIABLE",
+ /* 127 */ "INTERVAL",
+ /* 128 */ "EVERY",
+ /* 129 */ "SESSION",
+ /* 130 */ "STATE_WINDOW",
+ /* 131 */ "FILL",
+ /* 132 */ "SLIDING",
+ /* 133 */ "ORDER",
+ /* 134 */ "BY",
+ /* 135 */ "ASC",
+ /* 136 */ "GROUP",
+ /* 137 */ "HAVING",
+ /* 138 */ "LIMIT",
+ /* 139 */ "OFFSET",
+ /* 140 */ "SLIMIT",
+ /* 141 */ "SOFFSET",
+ /* 142 */ "WHERE",
+ /* 143 */ "RESET",
+ /* 144 */ "QUERY",
+ /* 145 */ "SYNCDB",
+ /* 146 */ "ADD",
+ /* 147 */ "COLUMN",
+ /* 148 */ "MODIFY",
+ /* 149 */ "TAG",
+ /* 150 */ "CHANGE",
+ /* 151 */ "SET",
+ /* 152 */ "KILL",
+ /* 153 */ "CONNECTION",
+ /* 154 */ "STREAM",
+ /* 155 */ "COLON",
+ /* 156 */ "ABORT",
+ /* 157 */ "AFTER",
+ /* 158 */ "ATTACH",
+ /* 159 */ "BEFORE",
+ /* 160 */ "BEGIN",
+ /* 161 */ "CASCADE",
+ /* 162 */ "CLUSTER",
+ /* 163 */ "CONFLICT",
+ /* 164 */ "COPY",
+ /* 165 */ "DEFERRED",
+ /* 166 */ "DELIMITERS",
+ /* 167 */ "DETACH",
+ /* 168 */ "EACH",
+ /* 169 */ "END",
+ /* 170 */ "EXPLAIN",
+ /* 171 */ "FAIL",
+ /* 172 */ "FOR",
+ /* 173 */ "IGNORE",
+ /* 174 */ "IMMEDIATE",
+ /* 175 */ "INITIALLY",
+ /* 176 */ "INSTEAD",
+ /* 177 */ "KEY",
+ /* 178 */ "OF",
+ /* 179 */ "RAISE",
+ /* 180 */ "REPLACE",
+ /* 181 */ "RESTRICT",
+ /* 182 */ "ROW",
+ /* 183 */ "STATEMENT",
+ /* 184 */ "TRIGGER",
+ /* 185 */ "VIEW",
+ /* 186 */ "IPTOKEN",
+ /* 187 */ "SEMI",
+ /* 188 */ "NONE",
+ /* 189 */ "PREV",
+ /* 190 */ "LINEAR",
+ /* 191 */ "IMPORT",
+ /* 192 */ "TBNAME",
+ /* 193 */ "JOIN",
+ /* 194 */ "INSERT",
+ /* 195 */ "INTO",
+ /* 196 */ "VALUES",
+ /* 197 */ "program",
+ /* 198 */ "cmd",
+ /* 199 */ "ids",
+ /* 200 */ "dbPrefix",
+ /* 201 */ "cpxName",
+ /* 202 */ "ifexists",
+ /* 203 */ "alter_db_optr",
+ /* 204 */ "alter_topic_optr",
+ /* 205 */ "acct_optr",
+ /* 206 */ "exprlist",
+ /* 207 */ "ifnotexists",
+ /* 208 */ "db_optr",
+ /* 209 */ "topic_optr",
+ /* 210 */ "typename",
+ /* 211 */ "bufsize",
+ /* 212 */ "pps",
+ /* 213 */ "tseries",
+ /* 214 */ "dbs",
+ /* 215 */ "streams",
+ /* 216 */ "storage",
+ /* 217 */ "qtime",
+ /* 218 */ "users",
+ /* 219 */ "conns",
+ /* 220 */ "state",
+ /* 221 */ "intitemlist",
+ /* 222 */ "intitem",
+ /* 223 */ "keep",
+ /* 224 */ "cache",
+ /* 225 */ "replica",
+ /* 226 */ "quorum",
+ /* 227 */ "days",
+ /* 228 */ "minrows",
+ /* 229 */ "maxrows",
+ /* 230 */ "blocks",
+ /* 231 */ "ctime",
+ /* 232 */ "wal",
+ /* 233 */ "fsync",
+ /* 234 */ "comp",
+ /* 235 */ "prec",
+ /* 236 */ "update",
+ /* 237 */ "cachelast",
+ /* 238 */ "partitions",
+ /* 239 */ "signed",
+ /* 240 */ "create_table_args",
+ /* 241 */ "create_stable_args",
+ /* 242 */ "create_table_list",
+ /* 243 */ "create_from_stable",
+ /* 244 */ "columnlist",
+ /* 245 */ "tagitemlist",
+ /* 246 */ "tagNamelist",
+ /* 247 */ "select",
+ /* 248 */ "column",
+ /* 249 */ "tagitem",
+ /* 250 */ "selcollist",
+ /* 251 */ "from",
+ /* 252 */ "where_opt",
+ /* 253 */ "interval_option",
+ /* 254 */ "sliding_opt",
+ /* 255 */ "session_option",
+ /* 256 */ "windowstate_option",
+ /* 257 */ "fill_opt",
+ /* 258 */ "groupby_opt",
+ /* 259 */ "having_opt",
+ /* 260 */ "orderby_opt",
+ /* 261 */ "slimit_opt",
+ /* 262 */ "limit_opt",
+ /* 263 */ "union",
+ /* 264 */ "sclp",
+ /* 265 */ "distinct",
+ /* 266 */ "expr",
+ /* 267 */ "as",
+ /* 268 */ "tablelist",
+ /* 269 */ "sub",
+ /* 270 */ "tmvar",
+ /* 271 */ "intervalKey",
+ /* 272 */ "sortlist",
+ /* 273 */ "sortitem",
+ /* 274 */ "item",
+ /* 275 */ "sortorder",
+ /* 276 */ "grouplist",
+ /* 277 */ "expritem",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -1122,255 +1132,260 @@ static const char *const yyRuleName[] = {
/* 37 */ "cmd ::= DROP ACCOUNT ids",
/* 38 */ "cmd ::= USE ids",
/* 39 */ "cmd ::= DESCRIBE ids cpxName",
- /* 40 */ "cmd ::= ALTER USER ids PASS ids",
- /* 41 */ "cmd ::= ALTER USER ids PRIVILEGE ids",
- /* 42 */ "cmd ::= ALTER DNODE ids ids",
- /* 43 */ "cmd ::= ALTER DNODE ids ids ids",
- /* 44 */ "cmd ::= ALTER LOCAL ids",
- /* 45 */ "cmd ::= ALTER LOCAL ids ids",
- /* 46 */ "cmd ::= ALTER DATABASE ids alter_db_optr",
- /* 47 */ "cmd ::= ALTER TOPIC ids alter_topic_optr",
- /* 48 */ "cmd ::= ALTER ACCOUNT ids acct_optr",
- /* 49 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr",
- /* 50 */ "cmd ::= COMPACT VNODES IN LP exprlist RP",
- /* 51 */ "ids ::= ID",
- /* 52 */ "ids ::= STRING",
- /* 53 */ "ifexists ::= IF EXISTS",
- /* 54 */ "ifexists ::=",
- /* 55 */ "ifnotexists ::= IF NOT EXISTS",
- /* 56 */ "ifnotexists ::=",
- /* 57 */ "cmd ::= CREATE DNODE ids",
- /* 58 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr",
- /* 59 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr",
- /* 60 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr",
- /* 61 */ "cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize",
- /* 62 */ "cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize",
- /* 63 */ "cmd ::= CREATE USER ids PASS ids",
- /* 64 */ "bufsize ::=",
- /* 65 */ "bufsize ::= BUFSIZE INTEGER",
- /* 66 */ "pps ::=",
- /* 67 */ "pps ::= PPS INTEGER",
- /* 68 */ "tseries ::=",
- /* 69 */ "tseries ::= TSERIES INTEGER",
- /* 70 */ "dbs ::=",
- /* 71 */ "dbs ::= DBS INTEGER",
- /* 72 */ "streams ::=",
- /* 73 */ "streams ::= STREAMS INTEGER",
- /* 74 */ "storage ::=",
- /* 75 */ "storage ::= STORAGE INTEGER",
- /* 76 */ "qtime ::=",
- /* 77 */ "qtime ::= QTIME INTEGER",
- /* 78 */ "users ::=",
- /* 79 */ "users ::= USERS INTEGER",
- /* 80 */ "conns ::=",
- /* 81 */ "conns ::= CONNS INTEGER",
- /* 82 */ "state ::=",
- /* 83 */ "state ::= STATE ids",
- /* 84 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state",
- /* 85 */ "intitemlist ::= intitemlist COMMA intitem",
- /* 86 */ "intitemlist ::= intitem",
- /* 87 */ "intitem ::= INTEGER",
- /* 88 */ "keep ::= KEEP intitemlist",
- /* 89 */ "cache ::= CACHE INTEGER",
- /* 90 */ "replica ::= REPLICA INTEGER",
- /* 91 */ "quorum ::= QUORUM INTEGER",
- /* 92 */ "days ::= DAYS INTEGER",
- /* 93 */ "minrows ::= MINROWS INTEGER",
- /* 94 */ "maxrows ::= MAXROWS INTEGER",
- /* 95 */ "blocks ::= BLOCKS INTEGER",
- /* 96 */ "ctime ::= CTIME INTEGER",
- /* 97 */ "wal ::= WAL INTEGER",
- /* 98 */ "fsync ::= FSYNC INTEGER",
- /* 99 */ "comp ::= COMP INTEGER",
- /* 100 */ "prec ::= PRECISION STRING",
- /* 101 */ "update ::= UPDATE INTEGER",
- /* 102 */ "cachelast ::= CACHELAST INTEGER",
- /* 103 */ "partitions ::= PARTITIONS INTEGER",
- /* 104 */ "db_optr ::=",
- /* 105 */ "db_optr ::= db_optr cache",
- /* 106 */ "db_optr ::= db_optr replica",
- /* 107 */ "db_optr ::= db_optr quorum",
- /* 108 */ "db_optr ::= db_optr days",
- /* 109 */ "db_optr ::= db_optr minrows",
- /* 110 */ "db_optr ::= db_optr maxrows",
- /* 111 */ "db_optr ::= db_optr blocks",
- /* 112 */ "db_optr ::= db_optr ctime",
- /* 113 */ "db_optr ::= db_optr wal",
- /* 114 */ "db_optr ::= db_optr fsync",
- /* 115 */ "db_optr ::= db_optr comp",
- /* 116 */ "db_optr ::= db_optr prec",
- /* 117 */ "db_optr ::= db_optr keep",
- /* 118 */ "db_optr ::= db_optr update",
- /* 119 */ "db_optr ::= db_optr cachelast",
- /* 120 */ "topic_optr ::= db_optr",
- /* 121 */ "topic_optr ::= topic_optr partitions",
- /* 122 */ "alter_db_optr ::=",
- /* 123 */ "alter_db_optr ::= alter_db_optr replica",
- /* 124 */ "alter_db_optr ::= alter_db_optr quorum",
- /* 125 */ "alter_db_optr ::= alter_db_optr keep",
- /* 126 */ "alter_db_optr ::= alter_db_optr blocks",
- /* 127 */ "alter_db_optr ::= alter_db_optr comp",
- /* 128 */ "alter_db_optr ::= alter_db_optr update",
- /* 129 */ "alter_db_optr ::= alter_db_optr cachelast",
- /* 130 */ "alter_topic_optr ::= alter_db_optr",
- /* 131 */ "alter_topic_optr ::= alter_topic_optr partitions",
- /* 132 */ "typename ::= ids",
- /* 133 */ "typename ::= ids LP signed RP",
- /* 134 */ "typename ::= ids UNSIGNED",
- /* 135 */ "signed ::= INTEGER",
- /* 136 */ "signed ::= PLUS INTEGER",
- /* 137 */ "signed ::= MINUS INTEGER",
- /* 138 */ "cmd ::= CREATE TABLE create_table_args",
- /* 139 */ "cmd ::= CREATE TABLE create_stable_args",
- /* 140 */ "cmd ::= CREATE STABLE create_stable_args",
- /* 141 */ "cmd ::= CREATE TABLE create_table_list",
- /* 142 */ "create_table_list ::= create_from_stable",
- /* 143 */ "create_table_list ::= create_table_list create_from_stable",
- /* 144 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP",
- /* 145 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP",
- /* 146 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP",
- /* 147 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP",
- /* 148 */ "tagNamelist ::= tagNamelist COMMA ids",
- /* 149 */ "tagNamelist ::= ids",
- /* 150 */ "create_table_args ::= ifnotexists ids cpxName AS select",
- /* 151 */ "columnlist ::= columnlist COMMA column",
- /* 152 */ "columnlist ::= column",
- /* 153 */ "column ::= ids typename",
- /* 154 */ "tagitemlist ::= tagitemlist COMMA tagitem",
- /* 155 */ "tagitemlist ::= tagitem",
- /* 156 */ "tagitem ::= INTEGER",
- /* 157 */ "tagitem ::= FLOAT",
- /* 158 */ "tagitem ::= STRING",
- /* 159 */ "tagitem ::= BOOL",
- /* 160 */ "tagitem ::= NULL",
- /* 161 */ "tagitem ::= NOW",
- /* 162 */ "tagitem ::= MINUS INTEGER",
- /* 163 */ "tagitem ::= MINUS FLOAT",
- /* 164 */ "tagitem ::= PLUS INTEGER",
- /* 165 */ "tagitem ::= PLUS FLOAT",
- /* 166 */ "select ::= SELECT selcollist from where_opt interval_opt sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt",
- /* 167 */ "select ::= LP select RP",
- /* 168 */ "union ::= select",
- /* 169 */ "union ::= union UNION ALL select",
- /* 170 */ "cmd ::= union",
- /* 171 */ "select ::= SELECT selcollist",
- /* 172 */ "sclp ::= selcollist COMMA",
- /* 173 */ "sclp ::=",
- /* 174 */ "selcollist ::= sclp distinct expr as",
- /* 175 */ "selcollist ::= sclp STAR",
- /* 176 */ "as ::= AS ids",
- /* 177 */ "as ::= ids",
- /* 178 */ "as ::=",
- /* 179 */ "distinct ::= DISTINCT",
- /* 180 */ "distinct ::=",
- /* 181 */ "from ::= FROM tablelist",
- /* 182 */ "from ::= FROM sub",
- /* 183 */ "sub ::= LP union RP",
- /* 184 */ "sub ::= LP union RP ids",
- /* 185 */ "sub ::= sub COMMA LP union RP ids",
- /* 186 */ "tablelist ::= ids cpxName",
- /* 187 */ "tablelist ::= ids cpxName ids",
- /* 188 */ "tablelist ::= tablelist COMMA ids cpxName",
- /* 189 */ "tablelist ::= tablelist COMMA ids cpxName ids",
- /* 190 */ "tmvar ::= VARIABLE",
- /* 191 */ "interval_opt ::= INTERVAL LP tmvar RP",
- /* 192 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP",
- /* 193 */ "interval_opt ::=",
- /* 194 */ "session_option ::=",
- /* 195 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP",
- /* 196 */ "windowstate_option ::=",
- /* 197 */ "windowstate_option ::= STATE_WINDOW LP ids RP",
- /* 198 */ "fill_opt ::=",
- /* 199 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
- /* 200 */ "fill_opt ::= FILL LP ID RP",
- /* 201 */ "sliding_opt ::= SLIDING LP tmvar RP",
- /* 202 */ "sliding_opt ::=",
- /* 203 */ "orderby_opt ::=",
- /* 204 */ "orderby_opt ::= ORDER BY sortlist",
- /* 205 */ "sortlist ::= sortlist COMMA item sortorder",
- /* 206 */ "sortlist ::= item sortorder",
- /* 207 */ "item ::= ids cpxName",
- /* 208 */ "sortorder ::= ASC",
- /* 209 */ "sortorder ::= DESC",
- /* 210 */ "sortorder ::=",
- /* 211 */ "groupby_opt ::=",
- /* 212 */ "groupby_opt ::= GROUP BY grouplist",
- /* 213 */ "grouplist ::= grouplist COMMA item",
- /* 214 */ "grouplist ::= item",
- /* 215 */ "having_opt ::=",
- /* 216 */ "having_opt ::= HAVING expr",
- /* 217 */ "limit_opt ::=",
- /* 218 */ "limit_opt ::= LIMIT signed",
- /* 219 */ "limit_opt ::= LIMIT signed OFFSET signed",
- /* 220 */ "limit_opt ::= LIMIT signed COMMA signed",
- /* 221 */ "slimit_opt ::=",
- /* 222 */ "slimit_opt ::= SLIMIT signed",
- /* 223 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
- /* 224 */ "slimit_opt ::= SLIMIT signed COMMA signed",
- /* 225 */ "where_opt ::=",
- /* 226 */ "where_opt ::= WHERE expr",
- /* 227 */ "expr ::= LP expr RP",
- /* 228 */ "expr ::= ID",
- /* 229 */ "expr ::= ID DOT ID",
- /* 230 */ "expr ::= ID DOT STAR",
- /* 231 */ "expr ::= INTEGER",
- /* 232 */ "expr ::= MINUS INTEGER",
- /* 233 */ "expr ::= PLUS INTEGER",
- /* 234 */ "expr ::= FLOAT",
- /* 235 */ "expr ::= MINUS FLOAT",
- /* 236 */ "expr ::= PLUS FLOAT",
- /* 237 */ "expr ::= STRING",
- /* 238 */ "expr ::= NOW",
- /* 239 */ "expr ::= VARIABLE",
- /* 240 */ "expr ::= PLUS VARIABLE",
- /* 241 */ "expr ::= MINUS VARIABLE",
- /* 242 */ "expr ::= BOOL",
- /* 243 */ "expr ::= NULL",
- /* 244 */ "expr ::= ID LP exprlist RP",
- /* 245 */ "expr ::= ID LP STAR RP",
- /* 246 */ "expr ::= expr IS NULL",
- /* 247 */ "expr ::= expr IS NOT NULL",
- /* 248 */ "expr ::= expr LT expr",
- /* 249 */ "expr ::= expr GT expr",
- /* 250 */ "expr ::= expr LE expr",
- /* 251 */ "expr ::= expr GE expr",
- /* 252 */ "expr ::= expr NE expr",
- /* 253 */ "expr ::= expr EQ expr",
- /* 254 */ "expr ::= expr BETWEEN expr AND expr",
- /* 255 */ "expr ::= expr AND expr",
- /* 256 */ "expr ::= expr OR expr",
- /* 257 */ "expr ::= expr PLUS expr",
- /* 258 */ "expr ::= expr MINUS expr",
- /* 259 */ "expr ::= expr STAR expr",
- /* 260 */ "expr ::= expr SLASH expr",
- /* 261 */ "expr ::= expr REM expr",
- /* 262 */ "expr ::= expr LIKE expr",
- /* 263 */ "expr ::= expr IN LP exprlist RP",
- /* 264 */ "exprlist ::= exprlist COMMA expritem",
- /* 265 */ "exprlist ::= expritem",
- /* 266 */ "expritem ::= expr",
- /* 267 */ "expritem ::=",
- /* 268 */ "cmd ::= RESET QUERY CACHE",
- /* 269 */ "cmd ::= SYNCDB ids REPLICA",
- /* 270 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
- /* 271 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
- /* 272 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist",
- /* 273 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
- /* 274 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
- /* 275 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
- /* 276 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
- /* 277 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist",
- /* 278 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
- /* 279 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
- /* 280 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist",
- /* 281 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
- /* 282 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
- /* 283 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
- /* 284 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem",
- /* 285 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist",
- /* 286 */ "cmd ::= KILL CONNECTION INTEGER",
- /* 287 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
- /* 288 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
+ /* 40 */ "cmd ::= DESC ids cpxName",
+ /* 41 */ "cmd ::= ALTER USER ids PASS ids",
+ /* 42 */ "cmd ::= ALTER USER ids PRIVILEGE ids",
+ /* 43 */ "cmd ::= ALTER DNODE ids ids",
+ /* 44 */ "cmd ::= ALTER DNODE ids ids ids",
+ /* 45 */ "cmd ::= ALTER LOCAL ids",
+ /* 46 */ "cmd ::= ALTER LOCAL ids ids",
+ /* 47 */ "cmd ::= ALTER DATABASE ids alter_db_optr",
+ /* 48 */ "cmd ::= ALTER TOPIC ids alter_topic_optr",
+ /* 49 */ "cmd ::= ALTER ACCOUNT ids acct_optr",
+ /* 50 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr",
+ /* 51 */ "cmd ::= COMPACT VNODES IN LP exprlist RP",
+ /* 52 */ "ids ::= ID",
+ /* 53 */ "ids ::= STRING",
+ /* 54 */ "ifexists ::= IF EXISTS",
+ /* 55 */ "ifexists ::=",
+ /* 56 */ "ifnotexists ::= IF NOT EXISTS",
+ /* 57 */ "ifnotexists ::=",
+ /* 58 */ "cmd ::= CREATE DNODE ids",
+ /* 59 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr",
+ /* 60 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr",
+ /* 61 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr",
+ /* 62 */ "cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize",
+ /* 63 */ "cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize",
+ /* 64 */ "cmd ::= CREATE USER ids PASS ids",
+ /* 65 */ "bufsize ::=",
+ /* 66 */ "bufsize ::= BUFSIZE INTEGER",
+ /* 67 */ "pps ::=",
+ /* 68 */ "pps ::= PPS INTEGER",
+ /* 69 */ "tseries ::=",
+ /* 70 */ "tseries ::= TSERIES INTEGER",
+ /* 71 */ "dbs ::=",
+ /* 72 */ "dbs ::= DBS INTEGER",
+ /* 73 */ "streams ::=",
+ /* 74 */ "streams ::= STREAMS INTEGER",
+ /* 75 */ "storage ::=",
+ /* 76 */ "storage ::= STORAGE INTEGER",
+ /* 77 */ "qtime ::=",
+ /* 78 */ "qtime ::= QTIME INTEGER",
+ /* 79 */ "users ::=",
+ /* 80 */ "users ::= USERS INTEGER",
+ /* 81 */ "conns ::=",
+ /* 82 */ "conns ::= CONNS INTEGER",
+ /* 83 */ "state ::=",
+ /* 84 */ "state ::= STATE ids",
+ /* 85 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state",
+ /* 86 */ "intitemlist ::= intitemlist COMMA intitem",
+ /* 87 */ "intitemlist ::= intitem",
+ /* 88 */ "intitem ::= INTEGER",
+ /* 89 */ "keep ::= KEEP intitemlist",
+ /* 90 */ "cache ::= CACHE INTEGER",
+ /* 91 */ "replica ::= REPLICA INTEGER",
+ /* 92 */ "quorum ::= QUORUM INTEGER",
+ /* 93 */ "days ::= DAYS INTEGER",
+ /* 94 */ "minrows ::= MINROWS INTEGER",
+ /* 95 */ "maxrows ::= MAXROWS INTEGER",
+ /* 96 */ "blocks ::= BLOCKS INTEGER",
+ /* 97 */ "ctime ::= CTIME INTEGER",
+ /* 98 */ "wal ::= WAL INTEGER",
+ /* 99 */ "fsync ::= FSYNC INTEGER",
+ /* 100 */ "comp ::= COMP INTEGER",
+ /* 101 */ "prec ::= PRECISION STRING",
+ /* 102 */ "update ::= UPDATE INTEGER",
+ /* 103 */ "cachelast ::= CACHELAST INTEGER",
+ /* 104 */ "partitions ::= PARTITIONS INTEGER",
+ /* 105 */ "db_optr ::=",
+ /* 106 */ "db_optr ::= db_optr cache",
+ /* 107 */ "db_optr ::= db_optr replica",
+ /* 108 */ "db_optr ::= db_optr quorum",
+ /* 109 */ "db_optr ::= db_optr days",
+ /* 110 */ "db_optr ::= db_optr minrows",
+ /* 111 */ "db_optr ::= db_optr maxrows",
+ /* 112 */ "db_optr ::= db_optr blocks",
+ /* 113 */ "db_optr ::= db_optr ctime",
+ /* 114 */ "db_optr ::= db_optr wal",
+ /* 115 */ "db_optr ::= db_optr fsync",
+ /* 116 */ "db_optr ::= db_optr comp",
+ /* 117 */ "db_optr ::= db_optr prec",
+ /* 118 */ "db_optr ::= db_optr keep",
+ /* 119 */ "db_optr ::= db_optr update",
+ /* 120 */ "db_optr ::= db_optr cachelast",
+ /* 121 */ "topic_optr ::= db_optr",
+ /* 122 */ "topic_optr ::= topic_optr partitions",
+ /* 123 */ "alter_db_optr ::=",
+ /* 124 */ "alter_db_optr ::= alter_db_optr replica",
+ /* 125 */ "alter_db_optr ::= alter_db_optr quorum",
+ /* 126 */ "alter_db_optr ::= alter_db_optr keep",
+ /* 127 */ "alter_db_optr ::= alter_db_optr blocks",
+ /* 128 */ "alter_db_optr ::= alter_db_optr comp",
+ /* 129 */ "alter_db_optr ::= alter_db_optr update",
+ /* 130 */ "alter_db_optr ::= alter_db_optr cachelast",
+ /* 131 */ "alter_topic_optr ::= alter_db_optr",
+ /* 132 */ "alter_topic_optr ::= alter_topic_optr partitions",
+ /* 133 */ "typename ::= ids",
+ /* 134 */ "typename ::= ids LP signed RP",
+ /* 135 */ "typename ::= ids UNSIGNED",
+ /* 136 */ "signed ::= INTEGER",
+ /* 137 */ "signed ::= PLUS INTEGER",
+ /* 138 */ "signed ::= MINUS INTEGER",
+ /* 139 */ "cmd ::= CREATE TABLE create_table_args",
+ /* 140 */ "cmd ::= CREATE TABLE create_stable_args",
+ /* 141 */ "cmd ::= CREATE STABLE create_stable_args",
+ /* 142 */ "cmd ::= CREATE TABLE create_table_list",
+ /* 143 */ "create_table_list ::= create_from_stable",
+ /* 144 */ "create_table_list ::= create_table_list create_from_stable",
+ /* 145 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP",
+ /* 146 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP",
+ /* 147 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP",
+ /* 148 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP",
+ /* 149 */ "tagNamelist ::= tagNamelist COMMA ids",
+ /* 150 */ "tagNamelist ::= ids",
+ /* 151 */ "create_table_args ::= ifnotexists ids cpxName AS select",
+ /* 152 */ "columnlist ::= columnlist COMMA column",
+ /* 153 */ "columnlist ::= column",
+ /* 154 */ "column ::= ids typename",
+ /* 155 */ "tagitemlist ::= tagitemlist COMMA tagitem",
+ /* 156 */ "tagitemlist ::= tagitem",
+ /* 157 */ "tagitem ::= INTEGER",
+ /* 158 */ "tagitem ::= FLOAT",
+ /* 159 */ "tagitem ::= STRING",
+ /* 160 */ "tagitem ::= BOOL",
+ /* 161 */ "tagitem ::= NULL",
+ /* 162 */ "tagitem ::= NOW",
+ /* 163 */ "tagitem ::= MINUS INTEGER",
+ /* 164 */ "tagitem ::= MINUS FLOAT",
+ /* 165 */ "tagitem ::= PLUS INTEGER",
+ /* 166 */ "tagitem ::= PLUS FLOAT",
+ /* 167 */ "select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt",
+ /* 168 */ "select ::= LP select RP",
+ /* 169 */ "union ::= select",
+ /* 170 */ "union ::= union UNION ALL select",
+ /* 171 */ "cmd ::= union",
+ /* 172 */ "select ::= SELECT selcollist",
+ /* 173 */ "sclp ::= selcollist COMMA",
+ /* 174 */ "sclp ::=",
+ /* 175 */ "selcollist ::= sclp distinct expr as",
+ /* 176 */ "selcollist ::= sclp STAR",
+ /* 177 */ "as ::= AS ids",
+ /* 178 */ "as ::= ids",
+ /* 179 */ "as ::=",
+ /* 180 */ "distinct ::= DISTINCT",
+ /* 181 */ "distinct ::=",
+ /* 182 */ "from ::= FROM tablelist",
+ /* 183 */ "from ::= FROM sub",
+ /* 184 */ "sub ::= LP union RP",
+ /* 185 */ "sub ::= LP union RP ids",
+ /* 186 */ "sub ::= sub COMMA LP union RP ids",
+ /* 187 */ "tablelist ::= ids cpxName",
+ /* 188 */ "tablelist ::= ids cpxName ids",
+ /* 189 */ "tablelist ::= tablelist COMMA ids cpxName",
+ /* 190 */ "tablelist ::= tablelist COMMA ids cpxName ids",
+ /* 191 */ "tmvar ::= VARIABLE",
+ /* 192 */ "interval_option ::= intervalKey LP tmvar RP",
+ /* 193 */ "interval_option ::= intervalKey LP tmvar COMMA tmvar RP",
+ /* 194 */ "interval_option ::=",
+ /* 195 */ "intervalKey ::= INTERVAL",
+ /* 196 */ "intervalKey ::= EVERY",
+ /* 197 */ "session_option ::=",
+ /* 198 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP",
+ /* 199 */ "windowstate_option ::=",
+ /* 200 */ "windowstate_option ::= STATE_WINDOW LP ids RP",
+ /* 201 */ "fill_opt ::=",
+ /* 202 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP",
+ /* 203 */ "fill_opt ::= FILL LP ID RP",
+ /* 204 */ "sliding_opt ::= SLIDING LP tmvar RP",
+ /* 205 */ "sliding_opt ::=",
+ /* 206 */ "orderby_opt ::=",
+ /* 207 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 208 */ "sortlist ::= sortlist COMMA item sortorder",
+ /* 209 */ "sortlist ::= item sortorder",
+ /* 210 */ "item ::= ids cpxName",
+ /* 211 */ "sortorder ::= ASC",
+ /* 212 */ "sortorder ::= DESC",
+ /* 213 */ "sortorder ::=",
+ /* 214 */ "groupby_opt ::=",
+ /* 215 */ "groupby_opt ::= GROUP BY grouplist",
+ /* 216 */ "grouplist ::= grouplist COMMA item",
+ /* 217 */ "grouplist ::= item",
+ /* 218 */ "having_opt ::=",
+ /* 219 */ "having_opt ::= HAVING expr",
+ /* 220 */ "limit_opt ::=",
+ /* 221 */ "limit_opt ::= LIMIT signed",
+ /* 222 */ "limit_opt ::= LIMIT signed OFFSET signed",
+ /* 223 */ "limit_opt ::= LIMIT signed COMMA signed",
+ /* 224 */ "slimit_opt ::=",
+ /* 225 */ "slimit_opt ::= SLIMIT signed",
+ /* 226 */ "slimit_opt ::= SLIMIT signed SOFFSET signed",
+ /* 227 */ "slimit_opt ::= SLIMIT signed COMMA signed",
+ /* 228 */ "where_opt ::=",
+ /* 229 */ "where_opt ::= WHERE expr",
+ /* 230 */ "expr ::= LP expr RP",
+ /* 231 */ "expr ::= ID",
+ /* 232 */ "expr ::= ID DOT ID",
+ /* 233 */ "expr ::= ID DOT STAR",
+ /* 234 */ "expr ::= INTEGER",
+ /* 235 */ "expr ::= MINUS INTEGER",
+ /* 236 */ "expr ::= PLUS INTEGER",
+ /* 237 */ "expr ::= FLOAT",
+ /* 238 */ "expr ::= MINUS FLOAT",
+ /* 239 */ "expr ::= PLUS FLOAT",
+ /* 240 */ "expr ::= STRING",
+ /* 241 */ "expr ::= NOW",
+ /* 242 */ "expr ::= VARIABLE",
+ /* 243 */ "expr ::= PLUS VARIABLE",
+ /* 244 */ "expr ::= MINUS VARIABLE",
+ /* 245 */ "expr ::= BOOL",
+ /* 246 */ "expr ::= NULL",
+ /* 247 */ "expr ::= ID LP exprlist RP",
+ /* 248 */ "expr ::= ID LP STAR RP",
+ /* 249 */ "expr ::= expr IS NULL",
+ /* 250 */ "expr ::= expr IS NOT NULL",
+ /* 251 */ "expr ::= expr LT expr",
+ /* 252 */ "expr ::= expr GT expr",
+ /* 253 */ "expr ::= expr LE expr",
+ /* 254 */ "expr ::= expr GE expr",
+ /* 255 */ "expr ::= expr NE expr",
+ /* 256 */ "expr ::= expr EQ expr",
+ /* 257 */ "expr ::= expr BETWEEN expr AND expr",
+ /* 258 */ "expr ::= expr AND expr",
+ /* 259 */ "expr ::= expr OR expr",
+ /* 260 */ "expr ::= expr PLUS expr",
+ /* 261 */ "expr ::= expr MINUS expr",
+ /* 262 */ "expr ::= expr STAR expr",
+ /* 263 */ "expr ::= expr SLASH expr",
+ /* 264 */ "expr ::= expr REM expr",
+ /* 265 */ "expr ::= expr LIKE expr",
+ /* 266 */ "expr ::= expr MATCH expr",
+ /* 267 */ "expr ::= expr NMATCH expr",
+ /* 268 */ "expr ::= expr IN LP exprlist RP",
+ /* 269 */ "exprlist ::= exprlist COMMA expritem",
+ /* 270 */ "exprlist ::= expritem",
+ /* 271 */ "expritem ::= expr",
+ /* 272 */ "expritem ::=",
+ /* 273 */ "cmd ::= RESET QUERY CACHE",
+ /* 274 */ "cmd ::= SYNCDB ids REPLICA",
+ /* 275 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist",
+ /* 276 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids",
+ /* 277 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist",
+ /* 278 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist",
+ /* 279 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids",
+ /* 280 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids",
+ /* 281 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 282 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist",
+ /* 283 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist",
+ /* 284 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids",
+ /* 285 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist",
+ /* 286 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist",
+ /* 287 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids",
+ /* 288 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids",
+ /* 289 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem",
+ /* 290 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist",
+ /* 291 */ "cmd ::= KILL CONNECTION INTEGER",
+ /* 292 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER",
+ /* 293 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER",
};
#endif /* NDEBUG */
@@ -1496,76 +1511,76 @@ static void yy_destructor(
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
- case 204: /* exprlist */
- case 248: /* selcollist */
- case 262: /* sclp */
+ case 206: /* exprlist */
+ case 250: /* selcollist */
+ case 264: /* sclp */
{
-#line 750 "sql.y"
-tSqlExprListDestroy((yypminor->yy131));
-#line 1506 "sql.c"
+#line 762 "sql.y"
+tSqlExprListDestroy((yypminor->yy421));
+#line 1521 "sql.c"
}
break;
- case 219: /* intitemlist */
- case 221: /* keep */
- case 242: /* columnlist */
- case 243: /* tagitemlist */
- case 244: /* tagNamelist */
- case 255: /* fill_opt */
- case 256: /* groupby_opt */
- case 258: /* orderby_opt */
- case 269: /* sortlist */
- case 273: /* grouplist */
+ case 221: /* intitemlist */
+ case 223: /* keep */
+ case 244: /* columnlist */
+ case 245: /* tagitemlist */
+ case 246: /* tagNamelist */
+ case 257: /* fill_opt */
+ case 258: /* groupby_opt */
+ case 260: /* orderby_opt */
+ case 272: /* sortlist */
+ case 276: /* grouplist */
{
-#line 253 "sql.y"
-taosArrayDestroy((yypminor->yy131));
-#line 1522 "sql.c"
+#line 256 "sql.y"
+taosArrayDestroy((yypminor->yy421));
+#line 1537 "sql.c"
}
break;
- case 240: /* create_table_list */
+ case 242: /* create_table_list */
{
-#line 361 "sql.y"
-destroyCreateTableSql((yypminor->yy272));
-#line 1529 "sql.c"
+#line 364 "sql.y"
+destroyCreateTableSql((yypminor->yy438));
+#line 1544 "sql.c"
}
break;
- case 245: /* select */
+ case 247: /* select */
{
-#line 481 "sql.y"
-destroySqlNode((yypminor->yy256));
-#line 1536 "sql.c"
+#line 484 "sql.y"
+destroySqlNode((yypminor->yy56));
+#line 1551 "sql.c"
}
break;
- case 249: /* from */
- case 266: /* tablelist */
- case 267: /* sub */
+ case 251: /* from */
+ case 268: /* tablelist */
+ case 269: /* sub */
{
-#line 536 "sql.y"
-destroyRelationInfo((yypminor->yy544));
-#line 1545 "sql.c"
+#line 539 "sql.y"
+destroyRelationInfo((yypminor->yy8));
+#line 1560 "sql.c"
}
break;
- case 250: /* where_opt */
- case 257: /* having_opt */
- case 264: /* expr */
- case 274: /* expritem */
+ case 252: /* where_opt */
+ case 259: /* having_opt */
+ case 266: /* expr */
+ case 277: /* expritem */
{
-#line 683 "sql.y"
-tSqlExprDestroy((yypminor->yy46));
-#line 1555 "sql.c"
+#line 691 "sql.y"
+tSqlExprDestroy((yypminor->yy439));
+#line 1570 "sql.c"
}
break;
- case 261: /* union */
+ case 263: /* union */
{
-#line 489 "sql.y"
-destroyAllSqlNode((yypminor->yy131));
-#line 1562 "sql.c"
+#line 492 "sql.y"
+destroyAllSqlNode((yypminor->yy421));
+#line 1577 "sql.c"
}
break;
- case 270: /* sortitem */
+ case 273: /* sortitem */
{
-#line 616 "sql.y"
-tVariantDestroy(&(yypminor->yy516));
-#line 1569 "sql.c"
+#line 624 "sql.y"
+tVariantDestroy(&(yypminor->yy430));
+#line 1584 "sql.c"
}
break;
/********* End destructor definitions *****************************************/
@@ -1854,295 +1869,300 @@ static void yy_shift(
/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side
** of that rule */
static const YYCODETYPE yyRuleInfoLhs[] = {
- 195, /* (0) program ::= cmd */
- 196, /* (1) cmd ::= SHOW DATABASES */
- 196, /* (2) cmd ::= SHOW TOPICS */
- 196, /* (3) cmd ::= SHOW FUNCTIONS */
- 196, /* (4) cmd ::= SHOW MNODES */
- 196, /* (5) cmd ::= SHOW DNODES */
- 196, /* (6) cmd ::= SHOW ACCOUNTS */
- 196, /* (7) cmd ::= SHOW USERS */
- 196, /* (8) cmd ::= SHOW MODULES */
- 196, /* (9) cmd ::= SHOW QUERIES */
- 196, /* (10) cmd ::= SHOW CONNECTIONS */
- 196, /* (11) cmd ::= SHOW STREAMS */
- 196, /* (12) cmd ::= SHOW VARIABLES */
- 196, /* (13) cmd ::= SHOW SCORES */
- 196, /* (14) cmd ::= SHOW GRANTS */
- 196, /* (15) cmd ::= SHOW VNODES */
- 196, /* (16) cmd ::= SHOW VNODES ids */
- 198, /* (17) dbPrefix ::= */
- 198, /* (18) dbPrefix ::= ids DOT */
- 199, /* (19) cpxName ::= */
- 199, /* (20) cpxName ::= DOT ids */
- 196, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */
- 196, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */
- 196, /* (23) cmd ::= SHOW CREATE DATABASE ids */
- 196, /* (24) cmd ::= SHOW dbPrefix TABLES */
- 196, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */
- 196, /* (26) cmd ::= SHOW dbPrefix STABLES */
- 196, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */
- 196, /* (28) cmd ::= SHOW dbPrefix VGROUPS */
- 196, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */
- 196, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */
- 196, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */
- 196, /* (32) cmd ::= DROP DATABASE ifexists ids */
- 196, /* (33) cmd ::= DROP TOPIC ifexists ids */
- 196, /* (34) cmd ::= DROP FUNCTION ids */
- 196, /* (35) cmd ::= DROP DNODE ids */
- 196, /* (36) cmd ::= DROP USER ids */
- 196, /* (37) cmd ::= DROP ACCOUNT ids */
- 196, /* (38) cmd ::= USE ids */
- 196, /* (39) cmd ::= DESCRIBE ids cpxName */
- 196, /* (40) cmd ::= ALTER USER ids PASS ids */
- 196, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */
- 196, /* (42) cmd ::= ALTER DNODE ids ids */
- 196, /* (43) cmd ::= ALTER DNODE ids ids ids */
- 196, /* (44) cmd ::= ALTER LOCAL ids */
- 196, /* (45) cmd ::= ALTER LOCAL ids ids */
- 196, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */
- 196, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */
- 196, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */
- 196, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
- 196, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */
- 197, /* (51) ids ::= ID */
- 197, /* (52) ids ::= STRING */
- 200, /* (53) ifexists ::= IF EXISTS */
- 200, /* (54) ifexists ::= */
- 205, /* (55) ifnotexists ::= IF NOT EXISTS */
- 205, /* (56) ifnotexists ::= */
- 196, /* (57) cmd ::= CREATE DNODE ids */
- 196, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
- 196, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- 196, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
- 196, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- 196, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- 196, /* (63) cmd ::= CREATE USER ids PASS ids */
- 209, /* (64) bufsize ::= */
- 209, /* (65) bufsize ::= BUFSIZE INTEGER */
- 210, /* (66) pps ::= */
- 210, /* (67) pps ::= PPS INTEGER */
- 211, /* (68) tseries ::= */
- 211, /* (69) tseries ::= TSERIES INTEGER */
- 212, /* (70) dbs ::= */
- 212, /* (71) dbs ::= DBS INTEGER */
- 213, /* (72) streams ::= */
- 213, /* (73) streams ::= STREAMS INTEGER */
- 214, /* (74) storage ::= */
- 214, /* (75) storage ::= STORAGE INTEGER */
- 215, /* (76) qtime ::= */
- 215, /* (77) qtime ::= QTIME INTEGER */
- 216, /* (78) users ::= */
- 216, /* (79) users ::= USERS INTEGER */
- 217, /* (80) conns ::= */
- 217, /* (81) conns ::= CONNS INTEGER */
- 218, /* (82) state ::= */
- 218, /* (83) state ::= STATE ids */
- 203, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
- 219, /* (85) intitemlist ::= intitemlist COMMA intitem */
- 219, /* (86) intitemlist ::= intitem */
- 220, /* (87) intitem ::= INTEGER */
- 221, /* (88) keep ::= KEEP intitemlist */
- 222, /* (89) cache ::= CACHE INTEGER */
- 223, /* (90) replica ::= REPLICA INTEGER */
- 224, /* (91) quorum ::= QUORUM INTEGER */
- 225, /* (92) days ::= DAYS INTEGER */
- 226, /* (93) minrows ::= MINROWS INTEGER */
- 227, /* (94) maxrows ::= MAXROWS INTEGER */
- 228, /* (95) blocks ::= BLOCKS INTEGER */
- 229, /* (96) ctime ::= CTIME INTEGER */
- 230, /* (97) wal ::= WAL INTEGER */
- 231, /* (98) fsync ::= FSYNC INTEGER */
- 232, /* (99) comp ::= COMP INTEGER */
- 233, /* (100) prec ::= PRECISION STRING */
- 234, /* (101) update ::= UPDATE INTEGER */
- 235, /* (102) cachelast ::= CACHELAST INTEGER */
- 236, /* (103) partitions ::= PARTITIONS INTEGER */
- 206, /* (104) db_optr ::= */
- 206, /* (105) db_optr ::= db_optr cache */
- 206, /* (106) db_optr ::= db_optr replica */
- 206, /* (107) db_optr ::= db_optr quorum */
- 206, /* (108) db_optr ::= db_optr days */
- 206, /* (109) db_optr ::= db_optr minrows */
- 206, /* (110) db_optr ::= db_optr maxrows */
- 206, /* (111) db_optr ::= db_optr blocks */
- 206, /* (112) db_optr ::= db_optr ctime */
- 206, /* (113) db_optr ::= db_optr wal */
- 206, /* (114) db_optr ::= db_optr fsync */
- 206, /* (115) db_optr ::= db_optr comp */
- 206, /* (116) db_optr ::= db_optr prec */
- 206, /* (117) db_optr ::= db_optr keep */
- 206, /* (118) db_optr ::= db_optr update */
- 206, /* (119) db_optr ::= db_optr cachelast */
- 207, /* (120) topic_optr ::= db_optr */
- 207, /* (121) topic_optr ::= topic_optr partitions */
- 201, /* (122) alter_db_optr ::= */
- 201, /* (123) alter_db_optr ::= alter_db_optr replica */
- 201, /* (124) alter_db_optr ::= alter_db_optr quorum */
- 201, /* (125) alter_db_optr ::= alter_db_optr keep */
- 201, /* (126) alter_db_optr ::= alter_db_optr blocks */
- 201, /* (127) alter_db_optr ::= alter_db_optr comp */
- 201, /* (128) alter_db_optr ::= alter_db_optr update */
- 201, /* (129) alter_db_optr ::= alter_db_optr cachelast */
- 202, /* (130) alter_topic_optr ::= alter_db_optr */
- 202, /* (131) alter_topic_optr ::= alter_topic_optr partitions */
- 208, /* (132) typename ::= ids */
- 208, /* (133) typename ::= ids LP signed RP */
- 208, /* (134) typename ::= ids UNSIGNED */
- 237, /* (135) signed ::= INTEGER */
- 237, /* (136) signed ::= PLUS INTEGER */
- 237, /* (137) signed ::= MINUS INTEGER */
- 196, /* (138) cmd ::= CREATE TABLE create_table_args */
- 196, /* (139) cmd ::= CREATE TABLE create_stable_args */
- 196, /* (140) cmd ::= CREATE STABLE create_stable_args */
- 196, /* (141) cmd ::= CREATE TABLE create_table_list */
- 240, /* (142) create_table_list ::= create_from_stable */
- 240, /* (143) create_table_list ::= create_table_list create_from_stable */
- 238, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
- 239, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
- 241, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
- 241, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
- 244, /* (148) tagNamelist ::= tagNamelist COMMA ids */
- 244, /* (149) tagNamelist ::= ids */
- 238, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */
- 242, /* (151) columnlist ::= columnlist COMMA column */
- 242, /* (152) columnlist ::= column */
- 246, /* (153) column ::= ids typename */
- 243, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */
- 243, /* (155) tagitemlist ::= tagitem */
- 247, /* (156) tagitem ::= INTEGER */
- 247, /* (157) tagitem ::= FLOAT */
- 247, /* (158) tagitem ::= STRING */
- 247, /* (159) tagitem ::= BOOL */
- 247, /* (160) tagitem ::= NULL */
- 247, /* (161) tagitem ::= NOW */
- 247, /* (162) tagitem ::= MINUS INTEGER */
- 247, /* (163) tagitem ::= MINUS FLOAT */
- 247, /* (164) tagitem ::= PLUS INTEGER */
- 247, /* (165) tagitem ::= PLUS FLOAT */
- 245, /* (166) select ::= SELECT selcollist from where_opt interval_opt sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
- 245, /* (167) select ::= LP select RP */
- 261, /* (168) union ::= select */
- 261, /* (169) union ::= union UNION ALL select */
- 196, /* (170) cmd ::= union */
- 245, /* (171) select ::= SELECT selcollist */
- 262, /* (172) sclp ::= selcollist COMMA */
- 262, /* (173) sclp ::= */
- 248, /* (174) selcollist ::= sclp distinct expr as */
- 248, /* (175) selcollist ::= sclp STAR */
- 265, /* (176) as ::= AS ids */
- 265, /* (177) as ::= ids */
- 265, /* (178) as ::= */
- 263, /* (179) distinct ::= DISTINCT */
- 263, /* (180) distinct ::= */
- 249, /* (181) from ::= FROM tablelist */
- 249, /* (182) from ::= FROM sub */
- 267, /* (183) sub ::= LP union RP */
- 267, /* (184) sub ::= LP union RP ids */
- 267, /* (185) sub ::= sub COMMA LP union RP ids */
- 266, /* (186) tablelist ::= ids cpxName */
- 266, /* (187) tablelist ::= ids cpxName ids */
- 266, /* (188) tablelist ::= tablelist COMMA ids cpxName */
- 266, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */
- 268, /* (190) tmvar ::= VARIABLE */
- 251, /* (191) interval_opt ::= INTERVAL LP tmvar RP */
- 251, /* (192) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
- 251, /* (193) interval_opt ::= */
- 253, /* (194) session_option ::= */
- 253, /* (195) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
- 254, /* (196) windowstate_option ::= */
- 254, /* (197) windowstate_option ::= STATE_WINDOW LP ids RP */
- 255, /* (198) fill_opt ::= */
- 255, /* (199) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- 255, /* (200) fill_opt ::= FILL LP ID RP */
- 252, /* (201) sliding_opt ::= SLIDING LP tmvar RP */
- 252, /* (202) sliding_opt ::= */
- 258, /* (203) orderby_opt ::= */
- 258, /* (204) orderby_opt ::= ORDER BY sortlist */
- 269, /* (205) sortlist ::= sortlist COMMA item sortorder */
- 269, /* (206) sortlist ::= item sortorder */
- 271, /* (207) item ::= ids cpxName */
- 272, /* (208) sortorder ::= ASC */
- 272, /* (209) sortorder ::= DESC */
- 272, /* (210) sortorder ::= */
- 256, /* (211) groupby_opt ::= */
- 256, /* (212) groupby_opt ::= GROUP BY grouplist */
- 273, /* (213) grouplist ::= grouplist COMMA item */
- 273, /* (214) grouplist ::= item */
- 257, /* (215) having_opt ::= */
- 257, /* (216) having_opt ::= HAVING expr */
- 260, /* (217) limit_opt ::= */
- 260, /* (218) limit_opt ::= LIMIT signed */
- 260, /* (219) limit_opt ::= LIMIT signed OFFSET signed */
- 260, /* (220) limit_opt ::= LIMIT signed COMMA signed */
- 259, /* (221) slimit_opt ::= */
- 259, /* (222) slimit_opt ::= SLIMIT signed */
- 259, /* (223) slimit_opt ::= SLIMIT signed SOFFSET signed */
- 259, /* (224) slimit_opt ::= SLIMIT signed COMMA signed */
- 250, /* (225) where_opt ::= */
- 250, /* (226) where_opt ::= WHERE expr */
- 264, /* (227) expr ::= LP expr RP */
- 264, /* (228) expr ::= ID */
- 264, /* (229) expr ::= ID DOT ID */
- 264, /* (230) expr ::= ID DOT STAR */
- 264, /* (231) expr ::= INTEGER */
- 264, /* (232) expr ::= MINUS INTEGER */
- 264, /* (233) expr ::= PLUS INTEGER */
- 264, /* (234) expr ::= FLOAT */
- 264, /* (235) expr ::= MINUS FLOAT */
- 264, /* (236) expr ::= PLUS FLOAT */
- 264, /* (237) expr ::= STRING */
- 264, /* (238) expr ::= NOW */
- 264, /* (239) expr ::= VARIABLE */
- 264, /* (240) expr ::= PLUS VARIABLE */
- 264, /* (241) expr ::= MINUS VARIABLE */
- 264, /* (242) expr ::= BOOL */
- 264, /* (243) expr ::= NULL */
- 264, /* (244) expr ::= ID LP exprlist RP */
- 264, /* (245) expr ::= ID LP STAR RP */
- 264, /* (246) expr ::= expr IS NULL */
- 264, /* (247) expr ::= expr IS NOT NULL */
- 264, /* (248) expr ::= expr LT expr */
- 264, /* (249) expr ::= expr GT expr */
- 264, /* (250) expr ::= expr LE expr */
- 264, /* (251) expr ::= expr GE expr */
- 264, /* (252) expr ::= expr NE expr */
- 264, /* (253) expr ::= expr EQ expr */
- 264, /* (254) expr ::= expr BETWEEN expr AND expr */
- 264, /* (255) expr ::= expr AND expr */
- 264, /* (256) expr ::= expr OR expr */
- 264, /* (257) expr ::= expr PLUS expr */
- 264, /* (258) expr ::= expr MINUS expr */
- 264, /* (259) expr ::= expr STAR expr */
- 264, /* (260) expr ::= expr SLASH expr */
- 264, /* (261) expr ::= expr REM expr */
- 264, /* (262) expr ::= expr LIKE expr */
- 264, /* (263) expr ::= expr IN LP exprlist RP */
- 204, /* (264) exprlist ::= exprlist COMMA expritem */
- 204, /* (265) exprlist ::= expritem */
- 274, /* (266) expritem ::= expr */
- 274, /* (267) expritem ::= */
- 196, /* (268) cmd ::= RESET QUERY CACHE */
- 196, /* (269) cmd ::= SYNCDB ids REPLICA */
- 196, /* (270) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- 196, /* (271) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- 196, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
- 196, /* (273) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- 196, /* (274) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- 196, /* (275) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- 196, /* (276) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- 196, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
- 196, /* (278) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- 196, /* (279) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- 196, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
- 196, /* (281) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- 196, /* (282) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- 196, /* (283) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- 196, /* (284) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
- 196, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
- 196, /* (286) cmd ::= KILL CONNECTION INTEGER */
- 196, /* (287) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- 196, /* (288) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ 197, /* (0) program ::= cmd */
+ 198, /* (1) cmd ::= SHOW DATABASES */
+ 198, /* (2) cmd ::= SHOW TOPICS */
+ 198, /* (3) cmd ::= SHOW FUNCTIONS */
+ 198, /* (4) cmd ::= SHOW MNODES */
+ 198, /* (5) cmd ::= SHOW DNODES */
+ 198, /* (6) cmd ::= SHOW ACCOUNTS */
+ 198, /* (7) cmd ::= SHOW USERS */
+ 198, /* (8) cmd ::= SHOW MODULES */
+ 198, /* (9) cmd ::= SHOW QUERIES */
+ 198, /* (10) cmd ::= SHOW CONNECTIONS */
+ 198, /* (11) cmd ::= SHOW STREAMS */
+ 198, /* (12) cmd ::= SHOW VARIABLES */
+ 198, /* (13) cmd ::= SHOW SCORES */
+ 198, /* (14) cmd ::= SHOW GRANTS */
+ 198, /* (15) cmd ::= SHOW VNODES */
+ 198, /* (16) cmd ::= SHOW VNODES ids */
+ 200, /* (17) dbPrefix ::= */
+ 200, /* (18) dbPrefix ::= ids DOT */
+ 201, /* (19) cpxName ::= */
+ 201, /* (20) cpxName ::= DOT ids */
+ 198, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */
+ 198, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */
+ 198, /* (23) cmd ::= SHOW CREATE DATABASE ids */
+ 198, /* (24) cmd ::= SHOW dbPrefix TABLES */
+ 198, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */
+ 198, /* (26) cmd ::= SHOW dbPrefix STABLES */
+ 198, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */
+ 198, /* (28) cmd ::= SHOW dbPrefix VGROUPS */
+ 198, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */
+ 198, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */
+ 198, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */
+ 198, /* (32) cmd ::= DROP DATABASE ifexists ids */
+ 198, /* (33) cmd ::= DROP TOPIC ifexists ids */
+ 198, /* (34) cmd ::= DROP FUNCTION ids */
+ 198, /* (35) cmd ::= DROP DNODE ids */
+ 198, /* (36) cmd ::= DROP USER ids */
+ 198, /* (37) cmd ::= DROP ACCOUNT ids */
+ 198, /* (38) cmd ::= USE ids */
+ 198, /* (39) cmd ::= DESCRIBE ids cpxName */
+ 198, /* (40) cmd ::= DESC ids cpxName */
+ 198, /* (41) cmd ::= ALTER USER ids PASS ids */
+ 198, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */
+ 198, /* (43) cmd ::= ALTER DNODE ids ids */
+ 198, /* (44) cmd ::= ALTER DNODE ids ids ids */
+ 198, /* (45) cmd ::= ALTER LOCAL ids */
+ 198, /* (46) cmd ::= ALTER LOCAL ids ids */
+ 198, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */
+ 198, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */
+ 198, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */
+ 198, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+ 198, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */
+ 199, /* (52) ids ::= ID */
+ 199, /* (53) ids ::= STRING */
+ 202, /* (54) ifexists ::= IF EXISTS */
+ 202, /* (55) ifexists ::= */
+ 207, /* (56) ifnotexists ::= IF NOT EXISTS */
+ 207, /* (57) ifnotexists ::= */
+ 198, /* (58) cmd ::= CREATE DNODE ids */
+ 198, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
+ 198, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ 198, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
+ 198, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ 198, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ 198, /* (64) cmd ::= CREATE USER ids PASS ids */
+ 211, /* (65) bufsize ::= */
+ 211, /* (66) bufsize ::= BUFSIZE INTEGER */
+ 212, /* (67) pps ::= */
+ 212, /* (68) pps ::= PPS INTEGER */
+ 213, /* (69) tseries ::= */
+ 213, /* (70) tseries ::= TSERIES INTEGER */
+ 214, /* (71) dbs ::= */
+ 214, /* (72) dbs ::= DBS INTEGER */
+ 215, /* (73) streams ::= */
+ 215, /* (74) streams ::= STREAMS INTEGER */
+ 216, /* (75) storage ::= */
+ 216, /* (76) storage ::= STORAGE INTEGER */
+ 217, /* (77) qtime ::= */
+ 217, /* (78) qtime ::= QTIME INTEGER */
+ 218, /* (79) users ::= */
+ 218, /* (80) users ::= USERS INTEGER */
+ 219, /* (81) conns ::= */
+ 219, /* (82) conns ::= CONNS INTEGER */
+ 220, /* (83) state ::= */
+ 220, /* (84) state ::= STATE ids */
+ 205, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+ 221, /* (86) intitemlist ::= intitemlist COMMA intitem */
+ 221, /* (87) intitemlist ::= intitem */
+ 222, /* (88) intitem ::= INTEGER */
+ 223, /* (89) keep ::= KEEP intitemlist */
+ 224, /* (90) cache ::= CACHE INTEGER */
+ 225, /* (91) replica ::= REPLICA INTEGER */
+ 226, /* (92) quorum ::= QUORUM INTEGER */
+ 227, /* (93) days ::= DAYS INTEGER */
+ 228, /* (94) minrows ::= MINROWS INTEGER */
+ 229, /* (95) maxrows ::= MAXROWS INTEGER */
+ 230, /* (96) blocks ::= BLOCKS INTEGER */
+ 231, /* (97) ctime ::= CTIME INTEGER */
+ 232, /* (98) wal ::= WAL INTEGER */
+ 233, /* (99) fsync ::= FSYNC INTEGER */
+ 234, /* (100) comp ::= COMP INTEGER */
+ 235, /* (101) prec ::= PRECISION STRING */
+ 236, /* (102) update ::= UPDATE INTEGER */
+ 237, /* (103) cachelast ::= CACHELAST INTEGER */
+ 238, /* (104) partitions ::= PARTITIONS INTEGER */
+ 208, /* (105) db_optr ::= */
+ 208, /* (106) db_optr ::= db_optr cache */
+ 208, /* (107) db_optr ::= db_optr replica */
+ 208, /* (108) db_optr ::= db_optr quorum */
+ 208, /* (109) db_optr ::= db_optr days */
+ 208, /* (110) db_optr ::= db_optr minrows */
+ 208, /* (111) db_optr ::= db_optr maxrows */
+ 208, /* (112) db_optr ::= db_optr blocks */
+ 208, /* (113) db_optr ::= db_optr ctime */
+ 208, /* (114) db_optr ::= db_optr wal */
+ 208, /* (115) db_optr ::= db_optr fsync */
+ 208, /* (116) db_optr ::= db_optr comp */
+ 208, /* (117) db_optr ::= db_optr prec */
+ 208, /* (118) db_optr ::= db_optr keep */
+ 208, /* (119) db_optr ::= db_optr update */
+ 208, /* (120) db_optr ::= db_optr cachelast */
+ 209, /* (121) topic_optr ::= db_optr */
+ 209, /* (122) topic_optr ::= topic_optr partitions */
+ 203, /* (123) alter_db_optr ::= */
+ 203, /* (124) alter_db_optr ::= alter_db_optr replica */
+ 203, /* (125) alter_db_optr ::= alter_db_optr quorum */
+ 203, /* (126) alter_db_optr ::= alter_db_optr keep */
+ 203, /* (127) alter_db_optr ::= alter_db_optr blocks */
+ 203, /* (128) alter_db_optr ::= alter_db_optr comp */
+ 203, /* (129) alter_db_optr ::= alter_db_optr update */
+ 203, /* (130) alter_db_optr ::= alter_db_optr cachelast */
+ 204, /* (131) alter_topic_optr ::= alter_db_optr */
+ 204, /* (132) alter_topic_optr ::= alter_topic_optr partitions */
+ 210, /* (133) typename ::= ids */
+ 210, /* (134) typename ::= ids LP signed RP */
+ 210, /* (135) typename ::= ids UNSIGNED */
+ 239, /* (136) signed ::= INTEGER */
+ 239, /* (137) signed ::= PLUS INTEGER */
+ 239, /* (138) signed ::= MINUS INTEGER */
+ 198, /* (139) cmd ::= CREATE TABLE create_table_args */
+ 198, /* (140) cmd ::= CREATE TABLE create_stable_args */
+ 198, /* (141) cmd ::= CREATE STABLE create_stable_args */
+ 198, /* (142) cmd ::= CREATE TABLE create_table_list */
+ 242, /* (143) create_table_list ::= create_from_stable */
+ 242, /* (144) create_table_list ::= create_table_list create_from_stable */
+ 240, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+ 241, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+ 243, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+ 243, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+ 246, /* (149) tagNamelist ::= tagNamelist COMMA ids */
+ 246, /* (150) tagNamelist ::= ids */
+ 240, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */
+ 244, /* (152) columnlist ::= columnlist COMMA column */
+ 244, /* (153) columnlist ::= column */
+ 248, /* (154) column ::= ids typename */
+ 245, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */
+ 245, /* (156) tagitemlist ::= tagitem */
+ 249, /* (157) tagitem ::= INTEGER */
+ 249, /* (158) tagitem ::= FLOAT */
+ 249, /* (159) tagitem ::= STRING */
+ 249, /* (160) tagitem ::= BOOL */
+ 249, /* (161) tagitem ::= NULL */
+ 249, /* (162) tagitem ::= NOW */
+ 249, /* (163) tagitem ::= MINUS INTEGER */
+ 249, /* (164) tagitem ::= MINUS FLOAT */
+ 249, /* (165) tagitem ::= PLUS INTEGER */
+ 249, /* (166) tagitem ::= PLUS FLOAT */
+ 247, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
+ 247, /* (168) select ::= LP select RP */
+ 263, /* (169) union ::= select */
+ 263, /* (170) union ::= union UNION ALL select */
+ 198, /* (171) cmd ::= union */
+ 247, /* (172) select ::= SELECT selcollist */
+ 264, /* (173) sclp ::= selcollist COMMA */
+ 264, /* (174) sclp ::= */
+ 250, /* (175) selcollist ::= sclp distinct expr as */
+ 250, /* (176) selcollist ::= sclp STAR */
+ 267, /* (177) as ::= AS ids */
+ 267, /* (178) as ::= ids */
+ 267, /* (179) as ::= */
+ 265, /* (180) distinct ::= DISTINCT */
+ 265, /* (181) distinct ::= */
+ 251, /* (182) from ::= FROM tablelist */
+ 251, /* (183) from ::= FROM sub */
+ 269, /* (184) sub ::= LP union RP */
+ 269, /* (185) sub ::= LP union RP ids */
+ 269, /* (186) sub ::= sub COMMA LP union RP ids */
+ 268, /* (187) tablelist ::= ids cpxName */
+ 268, /* (188) tablelist ::= ids cpxName ids */
+ 268, /* (189) tablelist ::= tablelist COMMA ids cpxName */
+ 268, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */
+ 270, /* (191) tmvar ::= VARIABLE */
+ 253, /* (192) interval_option ::= intervalKey LP tmvar RP */
+ 253, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
+ 253, /* (194) interval_option ::= */
+ 271, /* (195) intervalKey ::= INTERVAL */
+ 271, /* (196) intervalKey ::= EVERY */
+ 255, /* (197) session_option ::= */
+ 255, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+ 256, /* (199) windowstate_option ::= */
+ 256, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */
+ 257, /* (201) fill_opt ::= */
+ 257, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ 257, /* (203) fill_opt ::= FILL LP ID RP */
+ 254, /* (204) sliding_opt ::= SLIDING LP tmvar RP */
+ 254, /* (205) sliding_opt ::= */
+ 260, /* (206) orderby_opt ::= */
+ 260, /* (207) orderby_opt ::= ORDER BY sortlist */
+ 272, /* (208) sortlist ::= sortlist COMMA item sortorder */
+ 272, /* (209) sortlist ::= item sortorder */
+ 274, /* (210) item ::= ids cpxName */
+ 275, /* (211) sortorder ::= ASC */
+ 275, /* (212) sortorder ::= DESC */
+ 275, /* (213) sortorder ::= */
+ 258, /* (214) groupby_opt ::= */
+ 258, /* (215) groupby_opt ::= GROUP BY grouplist */
+ 276, /* (216) grouplist ::= grouplist COMMA item */
+ 276, /* (217) grouplist ::= item */
+ 259, /* (218) having_opt ::= */
+ 259, /* (219) having_opt ::= HAVING expr */
+ 262, /* (220) limit_opt ::= */
+ 262, /* (221) limit_opt ::= LIMIT signed */
+ 262, /* (222) limit_opt ::= LIMIT signed OFFSET signed */
+ 262, /* (223) limit_opt ::= LIMIT signed COMMA signed */
+ 261, /* (224) slimit_opt ::= */
+ 261, /* (225) slimit_opt ::= SLIMIT signed */
+ 261, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */
+ 261, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */
+ 252, /* (228) where_opt ::= */
+ 252, /* (229) where_opt ::= WHERE expr */
+ 266, /* (230) expr ::= LP expr RP */
+ 266, /* (231) expr ::= ID */
+ 266, /* (232) expr ::= ID DOT ID */
+ 266, /* (233) expr ::= ID DOT STAR */
+ 266, /* (234) expr ::= INTEGER */
+ 266, /* (235) expr ::= MINUS INTEGER */
+ 266, /* (236) expr ::= PLUS INTEGER */
+ 266, /* (237) expr ::= FLOAT */
+ 266, /* (238) expr ::= MINUS FLOAT */
+ 266, /* (239) expr ::= PLUS FLOAT */
+ 266, /* (240) expr ::= STRING */
+ 266, /* (241) expr ::= NOW */
+ 266, /* (242) expr ::= VARIABLE */
+ 266, /* (243) expr ::= PLUS VARIABLE */
+ 266, /* (244) expr ::= MINUS VARIABLE */
+ 266, /* (245) expr ::= BOOL */
+ 266, /* (246) expr ::= NULL */
+ 266, /* (247) expr ::= ID LP exprlist RP */
+ 266, /* (248) expr ::= ID LP STAR RP */
+ 266, /* (249) expr ::= expr IS NULL */
+ 266, /* (250) expr ::= expr IS NOT NULL */
+ 266, /* (251) expr ::= expr LT expr */
+ 266, /* (252) expr ::= expr GT expr */
+ 266, /* (253) expr ::= expr LE expr */
+ 266, /* (254) expr ::= expr GE expr */
+ 266, /* (255) expr ::= expr NE expr */
+ 266, /* (256) expr ::= expr EQ expr */
+ 266, /* (257) expr ::= expr BETWEEN expr AND expr */
+ 266, /* (258) expr ::= expr AND expr */
+ 266, /* (259) expr ::= expr OR expr */
+ 266, /* (260) expr ::= expr PLUS expr */
+ 266, /* (261) expr ::= expr MINUS expr */
+ 266, /* (262) expr ::= expr STAR expr */
+ 266, /* (263) expr ::= expr SLASH expr */
+ 266, /* (264) expr ::= expr REM expr */
+ 266, /* (265) expr ::= expr LIKE expr */
+ 266, /* (266) expr ::= expr MATCH expr */
+ 266, /* (267) expr ::= expr NMATCH expr */
+ 266, /* (268) expr ::= expr IN LP exprlist RP */
+ 206, /* (269) exprlist ::= exprlist COMMA expritem */
+ 206, /* (270) exprlist ::= expritem */
+ 277, /* (271) expritem ::= expr */
+ 277, /* (272) expritem ::= */
+ 198, /* (273) cmd ::= RESET QUERY CACHE */
+ 198, /* (274) cmd ::= SYNCDB ids REPLICA */
+ 198, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ 198, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ 198, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+ 198, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ 198, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ 198, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ 198, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ 198, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+ 198, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ 198, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ 198, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+ 198, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ 198, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ 198, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ 198, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+ 198, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+ 198, /* (291) cmd ::= KILL CONNECTION INTEGER */
+ 198, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ 198, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -2188,255 +2208,260 @@ static const signed char yyRuleInfoNRhs[] = {
-3, /* (37) cmd ::= DROP ACCOUNT ids */
-2, /* (38) cmd ::= USE ids */
-3, /* (39) cmd ::= DESCRIBE ids cpxName */
- -5, /* (40) cmd ::= ALTER USER ids PASS ids */
- -5, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */
- -4, /* (42) cmd ::= ALTER DNODE ids ids */
- -5, /* (43) cmd ::= ALTER DNODE ids ids ids */
- -3, /* (44) cmd ::= ALTER LOCAL ids */
- -4, /* (45) cmd ::= ALTER LOCAL ids ids */
- -4, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */
- -4, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */
- -4, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */
- -6, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
- -6, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */
- -1, /* (51) ids ::= ID */
- -1, /* (52) ids ::= STRING */
- -2, /* (53) ifexists ::= IF EXISTS */
- 0, /* (54) ifexists ::= */
- -3, /* (55) ifnotexists ::= IF NOT EXISTS */
- 0, /* (56) ifnotexists ::= */
- -3, /* (57) cmd ::= CREATE DNODE ids */
- -6, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
- -5, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- -5, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
- -8, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- -9, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- -5, /* (63) cmd ::= CREATE USER ids PASS ids */
- 0, /* (64) bufsize ::= */
- -2, /* (65) bufsize ::= BUFSIZE INTEGER */
- 0, /* (66) pps ::= */
- -2, /* (67) pps ::= PPS INTEGER */
- 0, /* (68) tseries ::= */
- -2, /* (69) tseries ::= TSERIES INTEGER */
- 0, /* (70) dbs ::= */
- -2, /* (71) dbs ::= DBS INTEGER */
- 0, /* (72) streams ::= */
- -2, /* (73) streams ::= STREAMS INTEGER */
- 0, /* (74) storage ::= */
- -2, /* (75) storage ::= STORAGE INTEGER */
- 0, /* (76) qtime ::= */
- -2, /* (77) qtime ::= QTIME INTEGER */
- 0, /* (78) users ::= */
- -2, /* (79) users ::= USERS INTEGER */
- 0, /* (80) conns ::= */
- -2, /* (81) conns ::= CONNS INTEGER */
- 0, /* (82) state ::= */
- -2, /* (83) state ::= STATE ids */
- -9, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
- -3, /* (85) intitemlist ::= intitemlist COMMA intitem */
- -1, /* (86) intitemlist ::= intitem */
- -1, /* (87) intitem ::= INTEGER */
- -2, /* (88) keep ::= KEEP intitemlist */
- -2, /* (89) cache ::= CACHE INTEGER */
- -2, /* (90) replica ::= REPLICA INTEGER */
- -2, /* (91) quorum ::= QUORUM INTEGER */
- -2, /* (92) days ::= DAYS INTEGER */
- -2, /* (93) minrows ::= MINROWS INTEGER */
- -2, /* (94) maxrows ::= MAXROWS INTEGER */
- -2, /* (95) blocks ::= BLOCKS INTEGER */
- -2, /* (96) ctime ::= CTIME INTEGER */
- -2, /* (97) wal ::= WAL INTEGER */
- -2, /* (98) fsync ::= FSYNC INTEGER */
- -2, /* (99) comp ::= COMP INTEGER */
- -2, /* (100) prec ::= PRECISION STRING */
- -2, /* (101) update ::= UPDATE INTEGER */
- -2, /* (102) cachelast ::= CACHELAST INTEGER */
- -2, /* (103) partitions ::= PARTITIONS INTEGER */
- 0, /* (104) db_optr ::= */
- -2, /* (105) db_optr ::= db_optr cache */
- -2, /* (106) db_optr ::= db_optr replica */
- -2, /* (107) db_optr ::= db_optr quorum */
- -2, /* (108) db_optr ::= db_optr days */
- -2, /* (109) db_optr ::= db_optr minrows */
- -2, /* (110) db_optr ::= db_optr maxrows */
- -2, /* (111) db_optr ::= db_optr blocks */
- -2, /* (112) db_optr ::= db_optr ctime */
- -2, /* (113) db_optr ::= db_optr wal */
- -2, /* (114) db_optr ::= db_optr fsync */
- -2, /* (115) db_optr ::= db_optr comp */
- -2, /* (116) db_optr ::= db_optr prec */
- -2, /* (117) db_optr ::= db_optr keep */
- -2, /* (118) db_optr ::= db_optr update */
- -2, /* (119) db_optr ::= db_optr cachelast */
- -1, /* (120) topic_optr ::= db_optr */
- -2, /* (121) topic_optr ::= topic_optr partitions */
- 0, /* (122) alter_db_optr ::= */
- -2, /* (123) alter_db_optr ::= alter_db_optr replica */
- -2, /* (124) alter_db_optr ::= alter_db_optr quorum */
- -2, /* (125) alter_db_optr ::= alter_db_optr keep */
- -2, /* (126) alter_db_optr ::= alter_db_optr blocks */
- -2, /* (127) alter_db_optr ::= alter_db_optr comp */
- -2, /* (128) alter_db_optr ::= alter_db_optr update */
- -2, /* (129) alter_db_optr ::= alter_db_optr cachelast */
- -1, /* (130) alter_topic_optr ::= alter_db_optr */
- -2, /* (131) alter_topic_optr ::= alter_topic_optr partitions */
- -1, /* (132) typename ::= ids */
- -4, /* (133) typename ::= ids LP signed RP */
- -2, /* (134) typename ::= ids UNSIGNED */
- -1, /* (135) signed ::= INTEGER */
- -2, /* (136) signed ::= PLUS INTEGER */
- -2, /* (137) signed ::= MINUS INTEGER */
- -3, /* (138) cmd ::= CREATE TABLE create_table_args */
- -3, /* (139) cmd ::= CREATE TABLE create_stable_args */
- -3, /* (140) cmd ::= CREATE STABLE create_stable_args */
- -3, /* (141) cmd ::= CREATE TABLE create_table_list */
- -1, /* (142) create_table_list ::= create_from_stable */
- -2, /* (143) create_table_list ::= create_table_list create_from_stable */
- -6, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
- -10, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
- -10, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
- -13, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
- -3, /* (148) tagNamelist ::= tagNamelist COMMA ids */
- -1, /* (149) tagNamelist ::= ids */
- -5, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */
- -3, /* (151) columnlist ::= columnlist COMMA column */
- -1, /* (152) columnlist ::= column */
- -2, /* (153) column ::= ids typename */
- -3, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */
- -1, /* (155) tagitemlist ::= tagitem */
- -1, /* (156) tagitem ::= INTEGER */
- -1, /* (157) tagitem ::= FLOAT */
- -1, /* (158) tagitem ::= STRING */
- -1, /* (159) tagitem ::= BOOL */
- -1, /* (160) tagitem ::= NULL */
- -1, /* (161) tagitem ::= NOW */
- -2, /* (162) tagitem ::= MINUS INTEGER */
- -2, /* (163) tagitem ::= MINUS FLOAT */
- -2, /* (164) tagitem ::= PLUS INTEGER */
- -2, /* (165) tagitem ::= PLUS FLOAT */
- -14, /* (166) select ::= SELECT selcollist from where_opt interval_opt sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
- -3, /* (167) select ::= LP select RP */
- -1, /* (168) union ::= select */
- -4, /* (169) union ::= union UNION ALL select */
- -1, /* (170) cmd ::= union */
- -2, /* (171) select ::= SELECT selcollist */
- -2, /* (172) sclp ::= selcollist COMMA */
- 0, /* (173) sclp ::= */
- -4, /* (174) selcollist ::= sclp distinct expr as */
- -2, /* (175) selcollist ::= sclp STAR */
- -2, /* (176) as ::= AS ids */
- -1, /* (177) as ::= ids */
- 0, /* (178) as ::= */
- -1, /* (179) distinct ::= DISTINCT */
- 0, /* (180) distinct ::= */
- -2, /* (181) from ::= FROM tablelist */
- -2, /* (182) from ::= FROM sub */
- -3, /* (183) sub ::= LP union RP */
- -4, /* (184) sub ::= LP union RP ids */
- -6, /* (185) sub ::= sub COMMA LP union RP ids */
- -2, /* (186) tablelist ::= ids cpxName */
- -3, /* (187) tablelist ::= ids cpxName ids */
- -4, /* (188) tablelist ::= tablelist COMMA ids cpxName */
- -5, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */
- -1, /* (190) tmvar ::= VARIABLE */
- -4, /* (191) interval_opt ::= INTERVAL LP tmvar RP */
- -6, /* (192) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
- 0, /* (193) interval_opt ::= */
- 0, /* (194) session_option ::= */
- -7, /* (195) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
- 0, /* (196) windowstate_option ::= */
- -4, /* (197) windowstate_option ::= STATE_WINDOW LP ids RP */
- 0, /* (198) fill_opt ::= */
- -6, /* (199) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- -4, /* (200) fill_opt ::= FILL LP ID RP */
- -4, /* (201) sliding_opt ::= SLIDING LP tmvar RP */
- 0, /* (202) sliding_opt ::= */
- 0, /* (203) orderby_opt ::= */
- -3, /* (204) orderby_opt ::= ORDER BY sortlist */
- -4, /* (205) sortlist ::= sortlist COMMA item sortorder */
- -2, /* (206) sortlist ::= item sortorder */
- -2, /* (207) item ::= ids cpxName */
- -1, /* (208) sortorder ::= ASC */
- -1, /* (209) sortorder ::= DESC */
- 0, /* (210) sortorder ::= */
- 0, /* (211) groupby_opt ::= */
- -3, /* (212) groupby_opt ::= GROUP BY grouplist */
- -3, /* (213) grouplist ::= grouplist COMMA item */
- -1, /* (214) grouplist ::= item */
- 0, /* (215) having_opt ::= */
- -2, /* (216) having_opt ::= HAVING expr */
- 0, /* (217) limit_opt ::= */
- -2, /* (218) limit_opt ::= LIMIT signed */
- -4, /* (219) limit_opt ::= LIMIT signed OFFSET signed */
- -4, /* (220) limit_opt ::= LIMIT signed COMMA signed */
- 0, /* (221) slimit_opt ::= */
- -2, /* (222) slimit_opt ::= SLIMIT signed */
- -4, /* (223) slimit_opt ::= SLIMIT signed SOFFSET signed */
- -4, /* (224) slimit_opt ::= SLIMIT signed COMMA signed */
- 0, /* (225) where_opt ::= */
- -2, /* (226) where_opt ::= WHERE expr */
- -3, /* (227) expr ::= LP expr RP */
- -1, /* (228) expr ::= ID */
- -3, /* (229) expr ::= ID DOT ID */
- -3, /* (230) expr ::= ID DOT STAR */
- -1, /* (231) expr ::= INTEGER */
- -2, /* (232) expr ::= MINUS INTEGER */
- -2, /* (233) expr ::= PLUS INTEGER */
- -1, /* (234) expr ::= FLOAT */
- -2, /* (235) expr ::= MINUS FLOAT */
- -2, /* (236) expr ::= PLUS FLOAT */
- -1, /* (237) expr ::= STRING */
- -1, /* (238) expr ::= NOW */
- -1, /* (239) expr ::= VARIABLE */
- -2, /* (240) expr ::= PLUS VARIABLE */
- -2, /* (241) expr ::= MINUS VARIABLE */
- -1, /* (242) expr ::= BOOL */
- -1, /* (243) expr ::= NULL */
- -4, /* (244) expr ::= ID LP exprlist RP */
- -4, /* (245) expr ::= ID LP STAR RP */
- -3, /* (246) expr ::= expr IS NULL */
- -4, /* (247) expr ::= expr IS NOT NULL */
- -3, /* (248) expr ::= expr LT expr */
- -3, /* (249) expr ::= expr GT expr */
- -3, /* (250) expr ::= expr LE expr */
- -3, /* (251) expr ::= expr GE expr */
- -3, /* (252) expr ::= expr NE expr */
- -3, /* (253) expr ::= expr EQ expr */
- -5, /* (254) expr ::= expr BETWEEN expr AND expr */
- -3, /* (255) expr ::= expr AND expr */
- -3, /* (256) expr ::= expr OR expr */
- -3, /* (257) expr ::= expr PLUS expr */
- -3, /* (258) expr ::= expr MINUS expr */
- -3, /* (259) expr ::= expr STAR expr */
- -3, /* (260) expr ::= expr SLASH expr */
- -3, /* (261) expr ::= expr REM expr */
- -3, /* (262) expr ::= expr LIKE expr */
- -5, /* (263) expr ::= expr IN LP exprlist RP */
- -3, /* (264) exprlist ::= exprlist COMMA expritem */
- -1, /* (265) exprlist ::= expritem */
- -1, /* (266) expritem ::= expr */
- 0, /* (267) expritem ::= */
- -3, /* (268) cmd ::= RESET QUERY CACHE */
- -3, /* (269) cmd ::= SYNCDB ids REPLICA */
- -7, /* (270) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (271) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- -7, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
- -7, /* (273) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- -7, /* (274) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- -8, /* (275) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- -9, /* (276) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- -7, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
- -7, /* (278) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (279) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- -7, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
- -7, /* (281) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- -7, /* (282) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- -8, /* (283) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- -9, /* (284) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
- -7, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
- -3, /* (286) cmd ::= KILL CONNECTION INTEGER */
- -5, /* (287) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- -5, /* (288) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+ -3, /* (40) cmd ::= DESC ids cpxName */
+ -5, /* (41) cmd ::= ALTER USER ids PASS ids */
+ -5, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */
+ -4, /* (43) cmd ::= ALTER DNODE ids ids */
+ -5, /* (44) cmd ::= ALTER DNODE ids ids ids */
+ -3, /* (45) cmd ::= ALTER LOCAL ids */
+ -4, /* (46) cmd ::= ALTER LOCAL ids ids */
+ -4, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */
+ -4, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */
+ -4, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */
+ -6, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+ -6, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */
+ -1, /* (52) ids ::= ID */
+ -1, /* (53) ids ::= STRING */
+ -2, /* (54) ifexists ::= IF EXISTS */
+ 0, /* (55) ifexists ::= */
+ -3, /* (56) ifnotexists ::= IF NOT EXISTS */
+ 0, /* (57) ifnotexists ::= */
+ -3, /* (58) cmd ::= CREATE DNODE ids */
+ -6, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
+ -5, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ -5, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
+ -8, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ -9, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ -5, /* (64) cmd ::= CREATE USER ids PASS ids */
+ 0, /* (65) bufsize ::= */
+ -2, /* (66) bufsize ::= BUFSIZE INTEGER */
+ 0, /* (67) pps ::= */
+ -2, /* (68) pps ::= PPS INTEGER */
+ 0, /* (69) tseries ::= */
+ -2, /* (70) tseries ::= TSERIES INTEGER */
+ 0, /* (71) dbs ::= */
+ -2, /* (72) dbs ::= DBS INTEGER */
+ 0, /* (73) streams ::= */
+ -2, /* (74) streams ::= STREAMS INTEGER */
+ 0, /* (75) storage ::= */
+ -2, /* (76) storage ::= STORAGE INTEGER */
+ 0, /* (77) qtime ::= */
+ -2, /* (78) qtime ::= QTIME INTEGER */
+ 0, /* (79) users ::= */
+ -2, /* (80) users ::= USERS INTEGER */
+ 0, /* (81) conns ::= */
+ -2, /* (82) conns ::= CONNS INTEGER */
+ 0, /* (83) state ::= */
+ -2, /* (84) state ::= STATE ids */
+ -9, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+ -3, /* (86) intitemlist ::= intitemlist COMMA intitem */
+ -1, /* (87) intitemlist ::= intitem */
+ -1, /* (88) intitem ::= INTEGER */
+ -2, /* (89) keep ::= KEEP intitemlist */
+ -2, /* (90) cache ::= CACHE INTEGER */
+ -2, /* (91) replica ::= REPLICA INTEGER */
+ -2, /* (92) quorum ::= QUORUM INTEGER */
+ -2, /* (93) days ::= DAYS INTEGER */
+ -2, /* (94) minrows ::= MINROWS INTEGER */
+ -2, /* (95) maxrows ::= MAXROWS INTEGER */
+ -2, /* (96) blocks ::= BLOCKS INTEGER */
+ -2, /* (97) ctime ::= CTIME INTEGER */
+ -2, /* (98) wal ::= WAL INTEGER */
+ -2, /* (99) fsync ::= FSYNC INTEGER */
+ -2, /* (100) comp ::= COMP INTEGER */
+ -2, /* (101) prec ::= PRECISION STRING */
+ -2, /* (102) update ::= UPDATE INTEGER */
+ -2, /* (103) cachelast ::= CACHELAST INTEGER */
+ -2, /* (104) partitions ::= PARTITIONS INTEGER */
+ 0, /* (105) db_optr ::= */
+ -2, /* (106) db_optr ::= db_optr cache */
+ -2, /* (107) db_optr ::= db_optr replica */
+ -2, /* (108) db_optr ::= db_optr quorum */
+ -2, /* (109) db_optr ::= db_optr days */
+ -2, /* (110) db_optr ::= db_optr minrows */
+ -2, /* (111) db_optr ::= db_optr maxrows */
+ -2, /* (112) db_optr ::= db_optr blocks */
+ -2, /* (113) db_optr ::= db_optr ctime */
+ -2, /* (114) db_optr ::= db_optr wal */
+ -2, /* (115) db_optr ::= db_optr fsync */
+ -2, /* (116) db_optr ::= db_optr comp */
+ -2, /* (117) db_optr ::= db_optr prec */
+ -2, /* (118) db_optr ::= db_optr keep */
+ -2, /* (119) db_optr ::= db_optr update */
+ -2, /* (120) db_optr ::= db_optr cachelast */
+ -1, /* (121) topic_optr ::= db_optr */
+ -2, /* (122) topic_optr ::= topic_optr partitions */
+ 0, /* (123) alter_db_optr ::= */
+ -2, /* (124) alter_db_optr ::= alter_db_optr replica */
+ -2, /* (125) alter_db_optr ::= alter_db_optr quorum */
+ -2, /* (126) alter_db_optr ::= alter_db_optr keep */
+ -2, /* (127) alter_db_optr ::= alter_db_optr blocks */
+ -2, /* (128) alter_db_optr ::= alter_db_optr comp */
+ -2, /* (129) alter_db_optr ::= alter_db_optr update */
+ -2, /* (130) alter_db_optr ::= alter_db_optr cachelast */
+ -1, /* (131) alter_topic_optr ::= alter_db_optr */
+ -2, /* (132) alter_topic_optr ::= alter_topic_optr partitions */
+ -1, /* (133) typename ::= ids */
+ -4, /* (134) typename ::= ids LP signed RP */
+ -2, /* (135) typename ::= ids UNSIGNED */
+ -1, /* (136) signed ::= INTEGER */
+ -2, /* (137) signed ::= PLUS INTEGER */
+ -2, /* (138) signed ::= MINUS INTEGER */
+ -3, /* (139) cmd ::= CREATE TABLE create_table_args */
+ -3, /* (140) cmd ::= CREATE TABLE create_stable_args */
+ -3, /* (141) cmd ::= CREATE STABLE create_stable_args */
+ -3, /* (142) cmd ::= CREATE TABLE create_table_list */
+ -1, /* (143) create_table_list ::= create_from_stable */
+ -2, /* (144) create_table_list ::= create_table_list create_from_stable */
+ -6, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+ -10, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+ -10, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+ -13, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+ -3, /* (149) tagNamelist ::= tagNamelist COMMA ids */
+ -1, /* (150) tagNamelist ::= ids */
+ -5, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */
+ -3, /* (152) columnlist ::= columnlist COMMA column */
+ -1, /* (153) columnlist ::= column */
+ -2, /* (154) column ::= ids typename */
+ -3, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */
+ -1, /* (156) tagitemlist ::= tagitem */
+ -1, /* (157) tagitem ::= INTEGER */
+ -1, /* (158) tagitem ::= FLOAT */
+ -1, /* (159) tagitem ::= STRING */
+ -1, /* (160) tagitem ::= BOOL */
+ -1, /* (161) tagitem ::= NULL */
+ -1, /* (162) tagitem ::= NOW */
+ -2, /* (163) tagitem ::= MINUS INTEGER */
+ -2, /* (164) tagitem ::= MINUS FLOAT */
+ -2, /* (165) tagitem ::= PLUS INTEGER */
+ -2, /* (166) tagitem ::= PLUS FLOAT */
+ -14, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
+ -3, /* (168) select ::= LP select RP */
+ -1, /* (169) union ::= select */
+ -4, /* (170) union ::= union UNION ALL select */
+ -1, /* (171) cmd ::= union */
+ -2, /* (172) select ::= SELECT selcollist */
+ -2, /* (173) sclp ::= selcollist COMMA */
+ 0, /* (174) sclp ::= */
+ -4, /* (175) selcollist ::= sclp distinct expr as */
+ -2, /* (176) selcollist ::= sclp STAR */
+ -2, /* (177) as ::= AS ids */
+ -1, /* (178) as ::= ids */
+ 0, /* (179) as ::= */
+ -1, /* (180) distinct ::= DISTINCT */
+ 0, /* (181) distinct ::= */
+ -2, /* (182) from ::= FROM tablelist */
+ -2, /* (183) from ::= FROM sub */
+ -3, /* (184) sub ::= LP union RP */
+ -4, /* (185) sub ::= LP union RP ids */
+ -6, /* (186) sub ::= sub COMMA LP union RP ids */
+ -2, /* (187) tablelist ::= ids cpxName */
+ -3, /* (188) tablelist ::= ids cpxName ids */
+ -4, /* (189) tablelist ::= tablelist COMMA ids cpxName */
+ -5, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */
+ -1, /* (191) tmvar ::= VARIABLE */
+ -4, /* (192) interval_option ::= intervalKey LP tmvar RP */
+ -6, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
+ 0, /* (194) interval_option ::= */
+ -1, /* (195) intervalKey ::= INTERVAL */
+ -1, /* (196) intervalKey ::= EVERY */
+ 0, /* (197) session_option ::= */
+ -7, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+ 0, /* (199) windowstate_option ::= */
+ -4, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */
+ 0, /* (201) fill_opt ::= */
+ -6, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ -4, /* (203) fill_opt ::= FILL LP ID RP */
+ -4, /* (204) sliding_opt ::= SLIDING LP tmvar RP */
+ 0, /* (205) sliding_opt ::= */
+ 0, /* (206) orderby_opt ::= */
+ -3, /* (207) orderby_opt ::= ORDER BY sortlist */
+ -4, /* (208) sortlist ::= sortlist COMMA item sortorder */
+ -2, /* (209) sortlist ::= item sortorder */
+ -2, /* (210) item ::= ids cpxName */
+ -1, /* (211) sortorder ::= ASC */
+ -1, /* (212) sortorder ::= DESC */
+ 0, /* (213) sortorder ::= */
+ 0, /* (214) groupby_opt ::= */
+ -3, /* (215) groupby_opt ::= GROUP BY grouplist */
+ -3, /* (216) grouplist ::= grouplist COMMA item */
+ -1, /* (217) grouplist ::= item */
+ 0, /* (218) having_opt ::= */
+ -2, /* (219) having_opt ::= HAVING expr */
+ 0, /* (220) limit_opt ::= */
+ -2, /* (221) limit_opt ::= LIMIT signed */
+ -4, /* (222) limit_opt ::= LIMIT signed OFFSET signed */
+ -4, /* (223) limit_opt ::= LIMIT signed COMMA signed */
+ 0, /* (224) slimit_opt ::= */
+ -2, /* (225) slimit_opt ::= SLIMIT signed */
+ -4, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */
+ -4, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */
+ 0, /* (228) where_opt ::= */
+ -2, /* (229) where_opt ::= WHERE expr */
+ -3, /* (230) expr ::= LP expr RP */
+ -1, /* (231) expr ::= ID */
+ -3, /* (232) expr ::= ID DOT ID */
+ -3, /* (233) expr ::= ID DOT STAR */
+ -1, /* (234) expr ::= INTEGER */
+ -2, /* (235) expr ::= MINUS INTEGER */
+ -2, /* (236) expr ::= PLUS INTEGER */
+ -1, /* (237) expr ::= FLOAT */
+ -2, /* (238) expr ::= MINUS FLOAT */
+ -2, /* (239) expr ::= PLUS FLOAT */
+ -1, /* (240) expr ::= STRING */
+ -1, /* (241) expr ::= NOW */
+ -1, /* (242) expr ::= VARIABLE */
+ -2, /* (243) expr ::= PLUS VARIABLE */
+ -2, /* (244) expr ::= MINUS VARIABLE */
+ -1, /* (245) expr ::= BOOL */
+ -1, /* (246) expr ::= NULL */
+ -4, /* (247) expr ::= ID LP exprlist RP */
+ -4, /* (248) expr ::= ID LP STAR RP */
+ -3, /* (249) expr ::= expr IS NULL */
+ -4, /* (250) expr ::= expr IS NOT NULL */
+ -3, /* (251) expr ::= expr LT expr */
+ -3, /* (252) expr ::= expr GT expr */
+ -3, /* (253) expr ::= expr LE expr */
+ -3, /* (254) expr ::= expr GE expr */
+ -3, /* (255) expr ::= expr NE expr */
+ -3, /* (256) expr ::= expr EQ expr */
+ -5, /* (257) expr ::= expr BETWEEN expr AND expr */
+ -3, /* (258) expr ::= expr AND expr */
+ -3, /* (259) expr ::= expr OR expr */
+ -3, /* (260) expr ::= expr PLUS expr */
+ -3, /* (261) expr ::= expr MINUS expr */
+ -3, /* (262) expr ::= expr STAR expr */
+ -3, /* (263) expr ::= expr SLASH expr */
+ -3, /* (264) expr ::= expr REM expr */
+ -3, /* (265) expr ::= expr LIKE expr */
+ -3, /* (266) expr ::= expr MATCH expr */
+ -3, /* (267) expr ::= expr NMATCH expr */
+ -5, /* (268) expr ::= expr IN LP exprlist RP */
+ -3, /* (269) exprlist ::= exprlist COMMA expritem */
+ -1, /* (270) exprlist ::= expritem */
+ -1, /* (271) expritem ::= expr */
+ 0, /* (272) expritem ::= */
+ -3, /* (273) cmd ::= RESET QUERY CACHE */
+ -3, /* (274) cmd ::= SYNCDB ids REPLICA */
+ -7, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ -7, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ -7, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+ -7, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ -7, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ -8, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ -9, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ -7, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+ -7, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ -7, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ -7, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+ -7, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ -7, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ -8, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ -9, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+ -7, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+ -3, /* (291) cmd ::= KILL CONNECTION INTEGER */
+ -5, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ -5, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -2527,113 +2552,113 @@ static YYACTIONTYPE yy_reduce(
/********** Begin reduce actions **********************************************/
YYMINORTYPE yylhsminor;
case 0: /* program ::= cmd */
- case 138: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==138);
- case 139: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==139);
- case 140: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==140);
+ case 139: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==139);
+ case 140: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==140);
+ case 141: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==141);
#line 63 "sql.y"
{}
-#line 2536 "sql.c"
+#line 2561 "sql.c"
break;
case 1: /* cmd ::= SHOW DATABASES */
#line 66 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);}
-#line 2541 "sql.c"
+#line 2566 "sql.c"
break;
case 2: /* cmd ::= SHOW TOPICS */
#line 67 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);}
-#line 2546 "sql.c"
+#line 2571 "sql.c"
break;
case 3: /* cmd ::= SHOW FUNCTIONS */
#line 68 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);}
-#line 2551 "sql.c"
+#line 2576 "sql.c"
break;
case 4: /* cmd ::= SHOW MNODES */
#line 69 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);}
-#line 2556 "sql.c"
+#line 2581 "sql.c"
break;
case 5: /* cmd ::= SHOW DNODES */
#line 70 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);}
-#line 2561 "sql.c"
+#line 2586 "sql.c"
break;
case 6: /* cmd ::= SHOW ACCOUNTS */
#line 71 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);}
-#line 2566 "sql.c"
+#line 2591 "sql.c"
break;
case 7: /* cmd ::= SHOW USERS */
#line 72 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);}
-#line 2571 "sql.c"
+#line 2596 "sql.c"
break;
case 8: /* cmd ::= SHOW MODULES */
#line 74 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); }
-#line 2576 "sql.c"
+#line 2601 "sql.c"
break;
case 9: /* cmd ::= SHOW QUERIES */
#line 75 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); }
-#line 2581 "sql.c"
+#line 2606 "sql.c"
break;
case 10: /* cmd ::= SHOW CONNECTIONS */
#line 76 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);}
-#line 2586 "sql.c"
+#line 2611 "sql.c"
break;
case 11: /* cmd ::= SHOW STREAMS */
#line 77 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); }
-#line 2591 "sql.c"
+#line 2616 "sql.c"
break;
case 12: /* cmd ::= SHOW VARIABLES */
#line 78 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VARIABLES, 0, 0); }
-#line 2596 "sql.c"
+#line 2621 "sql.c"
break;
case 13: /* cmd ::= SHOW SCORES */
#line 79 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); }
-#line 2601 "sql.c"
+#line 2626 "sql.c"
break;
case 14: /* cmd ::= SHOW GRANTS */
#line 80 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); }
-#line 2606 "sql.c"
+#line 2631 "sql.c"
break;
case 15: /* cmd ::= SHOW VNODES */
#line 82 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); }
-#line 2611 "sql.c"
+#line 2636 "sql.c"
break;
case 16: /* cmd ::= SHOW VNODES ids */
#line 83 "sql.y"
{ setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); }
-#line 2616 "sql.c"
+#line 2641 "sql.c"
break;
case 17: /* dbPrefix ::= */
#line 87 "sql.y"
{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;}
-#line 2621 "sql.c"
+#line 2646 "sql.c"
break;
case 18: /* dbPrefix ::= ids DOT */
#line 88 "sql.y"
{yylhsminor.yy0 = yymsp[-1].minor.yy0; }
-#line 2626 "sql.c"
+#line 2651 "sql.c"
yymsp[-1].minor.yy0 = yylhsminor.yy0;
break;
case 19: /* cpxName ::= */
#line 91 "sql.y"
{yymsp[1].minor.yy0.n = 0; }
-#line 2632 "sql.c"
+#line 2657 "sql.c"
break;
case 20: /* cpxName ::= DOT ids */
#line 92 "sql.y"
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; }
-#line 2637 "sql.c"
+#line 2662 "sql.c"
break;
case 21: /* cmd ::= SHOW CREATE TABLE ids cpxName */
#line 94 "sql.y"
@@ -2641,7 +2666,7 @@ static YYACTIONTYPE yy_reduce(
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0);
}
-#line 2645 "sql.c"
+#line 2670 "sql.c"
break;
case 22: /* cmd ::= SHOW CREATE STABLE ids cpxName */
#line 98 "sql.y"
@@ -2649,35 +2674,35 @@ static YYACTIONTYPE yy_reduce(
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0);
}
-#line 2653 "sql.c"
+#line 2678 "sql.c"
break;
case 23: /* cmd ::= SHOW CREATE DATABASE ids */
#line 103 "sql.y"
{
setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0);
}
-#line 2660 "sql.c"
+#line 2685 "sql.c"
break;
case 24: /* cmd ::= SHOW dbPrefix TABLES */
#line 107 "sql.y"
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0);
}
-#line 2667 "sql.c"
+#line 2692 "sql.c"
break;
case 25: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */
#line 111 "sql.y"
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0);
}
-#line 2674 "sql.c"
+#line 2699 "sql.c"
break;
case 26: /* cmd ::= SHOW dbPrefix STABLES */
#line 115 "sql.y"
{
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0);
}
-#line 2681 "sql.c"
+#line 2706 "sql.c"
break;
case 27: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */
#line 119 "sql.y"
@@ -2686,7 +2711,7 @@ static YYACTIONTYPE yy_reduce(
tSetDbName(&token, &yymsp[-3].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0);
}
-#line 2690 "sql.c"
+#line 2715 "sql.c"
break;
case 28: /* cmd ::= SHOW dbPrefix VGROUPS */
#line 125 "sql.y"
@@ -2695,7 +2720,7 @@ static YYACTIONTYPE yy_reduce(
tSetDbName(&token, &yymsp[-1].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
-#line 2699 "sql.c"
+#line 2724 "sql.c"
break;
case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */
#line 131 "sql.y"
@@ -2704,7 +2729,7 @@ static YYACTIONTYPE yy_reduce(
tSetDbName(&token, &yymsp[-2].minor.yy0);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0);
}
-#line 2708 "sql.c"
+#line 2733 "sql.c"
break;
case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */
#line 138 "sql.y"
@@ -2712,7 +2737,7 @@ static YYACTIONTYPE yy_reduce(
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1);
}
-#line 2716 "sql.c"
+#line 2741 "sql.c"
break;
case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */
#line 144 "sql.y"
@@ -2720,1151 +2745,1176 @@ static YYACTIONTYPE yy_reduce(
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE);
}
-#line 2724 "sql.c"
+#line 2749 "sql.c"
break;
case 32: /* cmd ::= DROP DATABASE ifexists ids */
#line 149 "sql.y"
{ setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); }
-#line 2729 "sql.c"
+#line 2754 "sql.c"
break;
case 33: /* cmd ::= DROP TOPIC ifexists ids */
#line 150 "sql.y"
{ setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); }
-#line 2734 "sql.c"
+#line 2759 "sql.c"
break;
case 34: /* cmd ::= DROP FUNCTION ids */
#line 151 "sql.y"
{ setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); }
-#line 2739 "sql.c"
+#line 2764 "sql.c"
break;
case 35: /* cmd ::= DROP DNODE ids */
#line 153 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); }
-#line 2744 "sql.c"
+#line 2769 "sql.c"
break;
case 36: /* cmd ::= DROP USER ids */
#line 154 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); }
-#line 2749 "sql.c"
+#line 2774 "sql.c"
break;
case 37: /* cmd ::= DROP ACCOUNT ids */
#line 155 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); }
-#line 2754 "sql.c"
+#line 2779 "sql.c"
break;
case 38: /* cmd ::= USE ids */
#line 158 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);}
-#line 2759 "sql.c"
+#line 2784 "sql.c"
break;
case 39: /* cmd ::= DESCRIBE ids cpxName */
+ case 40: /* cmd ::= DESC ids cpxName */ yytestcase(yyruleno==40);
#line 161 "sql.y"
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0);
}
-#line 2767 "sql.c"
+#line 2793 "sql.c"
break;
- case 40: /* cmd ::= ALTER USER ids PASS ids */
-#line 167 "sql.y"
+ case 41: /* cmd ::= ALTER USER ids PASS ids */
+#line 170 "sql.y"
{ setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); }
-#line 2772 "sql.c"
+#line 2798 "sql.c"
break;
- case 41: /* cmd ::= ALTER USER ids PRIVILEGE ids */
-#line 168 "sql.y"
+ case 42: /* cmd ::= ALTER USER ids PRIVILEGE ids */
+#line 171 "sql.y"
{ setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);}
-#line 2777 "sql.c"
+#line 2803 "sql.c"
break;
- case 42: /* cmd ::= ALTER DNODE ids ids */
-#line 169 "sql.y"
+ case 43: /* cmd ::= ALTER DNODE ids ids */
+#line 172 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
-#line 2782 "sql.c"
+#line 2808 "sql.c"
break;
- case 43: /* cmd ::= ALTER DNODE ids ids ids */
-#line 170 "sql.y"
+ case 44: /* cmd ::= ALTER DNODE ids ids ids */
+#line 173 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
-#line 2787 "sql.c"
+#line 2813 "sql.c"
break;
- case 44: /* cmd ::= ALTER LOCAL ids */
-#line 171 "sql.y"
+ case 45: /* cmd ::= ALTER LOCAL ids */
+#line 174 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); }
-#line 2792 "sql.c"
+#line 2818 "sql.c"
break;
- case 45: /* cmd ::= ALTER LOCAL ids ids */
-#line 172 "sql.y"
+ case 46: /* cmd ::= ALTER LOCAL ids ids */
+#line 175 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
-#line 2797 "sql.c"
- break;
- case 46: /* cmd ::= ALTER DATABASE ids alter_db_optr */
- case 47: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==47);
-#line 173 "sql.y"
-{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy42, &t);}
-#line 2803 "sql.c"
+#line 2823 "sql.c"
break;
- case 48: /* cmd ::= ALTER ACCOUNT ids acct_optr */
+ case 47: /* cmd ::= ALTER DATABASE ids alter_db_optr */
+ case 48: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==48);
#line 176 "sql.y"
-{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy341);}
-#line 2808 "sql.c"
- break;
- case 49: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
-#line 177 "sql.y"
-{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy341);}
-#line 2813 "sql.c"
- break;
- case 50: /* cmd ::= COMPACT VNODES IN LP exprlist RP */
-#line 181 "sql.y"
-{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy131);}
-#line 2818 "sql.c"
- break;
- case 51: /* ids ::= ID */
- case 52: /* ids ::= STRING */ yytestcase(yyruleno==52);
-#line 187 "sql.y"
+{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &t);}
+#line 2829 "sql.c"
+ break;
+ case 49: /* cmd ::= ALTER ACCOUNT ids acct_optr */
+#line 179 "sql.y"
+{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy171);}
+#line 2834 "sql.c"
+ break;
+ case 50: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+#line 180 "sql.y"
+{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);}
+#line 2839 "sql.c"
+ break;
+ case 51: /* cmd ::= COMPACT VNODES IN LP exprlist RP */
+#line 184 "sql.y"
+{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy421);}
+#line 2844 "sql.c"
+ break;
+ case 52: /* ids ::= ID */
+ case 53: /* ids ::= STRING */ yytestcase(yyruleno==53);
+#line 190 "sql.y"
{yylhsminor.yy0 = yymsp[0].minor.yy0; }
-#line 2824 "sql.c"
+#line 2850 "sql.c"
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 53: /* ifexists ::= IF EXISTS */
-#line 191 "sql.y"
+ case 54: /* ifexists ::= IF EXISTS */
+#line 194 "sql.y"
{ yymsp[-1].minor.yy0.n = 1;}
-#line 2830 "sql.c"
+#line 2856 "sql.c"
break;
- case 54: /* ifexists ::= */
- case 56: /* ifnotexists ::= */ yytestcase(yyruleno==56);
- case 180: /* distinct ::= */ yytestcase(yyruleno==180);
-#line 192 "sql.y"
+ case 55: /* ifexists ::= */
+ case 57: /* ifnotexists ::= */ yytestcase(yyruleno==57);
+ case 181: /* distinct ::= */ yytestcase(yyruleno==181);
+#line 195 "sql.y"
{ yymsp[1].minor.yy0.n = 0;}
-#line 2837 "sql.c"
+#line 2863 "sql.c"
break;
- case 55: /* ifnotexists ::= IF NOT EXISTS */
-#line 195 "sql.y"
+ case 56: /* ifnotexists ::= IF NOT EXISTS */
+#line 198 "sql.y"
{ yymsp[-2].minor.yy0.n = 1;}
-#line 2842 "sql.c"
- break;
- case 57: /* cmd ::= CREATE DNODE ids */
-#line 200 "sql.y"
-{ setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);}
-#line 2847 "sql.c"
- break;
- case 58: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
-#line 202 "sql.y"
-{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy341);}
-#line 2852 "sql.c"
+#line 2868 "sql.c"
break;
- case 59: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- case 60: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==60);
+ case 58: /* cmd ::= CREATE DNODE ids */
#line 203 "sql.y"
-{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy42, &yymsp[-2].minor.yy0);}
-#line 2858 "sql.c"
+{ setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);}
+#line 2873 "sql.c"
break;
- case 61: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ case 59: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
#line 205 "sql.y"
-{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy163, &yymsp[0].minor.yy0, 1);}
-#line 2863 "sql.c"
+{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);}
+#line 2878 "sql.c"
break;
- case 62: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ case 60: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ case 61: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==61);
#line 206 "sql.y"
-{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy163, &yymsp[0].minor.yy0, 2);}
-#line 2868 "sql.c"
+{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &yymsp[-2].minor.yy0);}
+#line 2884 "sql.c"
break;
- case 63: /* cmd ::= CREATE USER ids PASS ids */
-#line 207 "sql.y"
-{ setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);}
-#line 2873 "sql.c"
+ case 62: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+#line 208 "sql.y"
+{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 1);}
+#line 2889 "sql.c"
break;
- case 64: /* bufsize ::= */
- case 66: /* pps ::= */ yytestcase(yyruleno==66);
- case 68: /* tseries ::= */ yytestcase(yyruleno==68);
- case 70: /* dbs ::= */ yytestcase(yyruleno==70);
- case 72: /* streams ::= */ yytestcase(yyruleno==72);
- case 74: /* storage ::= */ yytestcase(yyruleno==74);
- case 76: /* qtime ::= */ yytestcase(yyruleno==76);
- case 78: /* users ::= */ yytestcase(yyruleno==78);
- case 80: /* conns ::= */ yytestcase(yyruleno==80);
- case 82: /* state ::= */ yytestcase(yyruleno==82);
+ case 63: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
#line 209 "sql.y"
-{ yymsp[1].minor.yy0.n = 0; }
-#line 2887 "sql.c"
- break;
- case 65: /* bufsize ::= BUFSIZE INTEGER */
- case 67: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==67);
- case 69: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==69);
- case 71: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==71);
- case 73: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==73);
- case 75: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==75);
- case 77: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==77);
- case 79: /* users ::= USERS INTEGER */ yytestcase(yyruleno==79);
- case 81: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==81);
- case 83: /* state ::= STATE ids */ yytestcase(yyruleno==83);
+{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 2);}
+#line 2894 "sql.c"
+ break;
+ case 64: /* cmd ::= CREATE USER ids PASS ids */
#line 210 "sql.y"
+{ setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);}
+#line 2899 "sql.c"
+ break;
+ case 65: /* bufsize ::= */
+ case 67: /* pps ::= */ yytestcase(yyruleno==67);
+ case 69: /* tseries ::= */ yytestcase(yyruleno==69);
+ case 71: /* dbs ::= */ yytestcase(yyruleno==71);
+ case 73: /* streams ::= */ yytestcase(yyruleno==73);
+ case 75: /* storage ::= */ yytestcase(yyruleno==75);
+ case 77: /* qtime ::= */ yytestcase(yyruleno==77);
+ case 79: /* users ::= */ yytestcase(yyruleno==79);
+ case 81: /* conns ::= */ yytestcase(yyruleno==81);
+ case 83: /* state ::= */ yytestcase(yyruleno==83);
+#line 212 "sql.y"
+{ yymsp[1].minor.yy0.n = 0; }
+#line 2913 "sql.c"
+ break;
+ case 66: /* bufsize ::= BUFSIZE INTEGER */
+ case 68: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==68);
+ case 70: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==70);
+ case 72: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==72);
+ case 74: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==74);
+ case 76: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==76);
+ case 78: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==78);
+ case 80: /* users ::= USERS INTEGER */ yytestcase(yyruleno==80);
+ case 82: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==82);
+ case 84: /* state ::= STATE ids */ yytestcase(yyruleno==84);
+#line 213 "sql.y"
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
-#line 2901 "sql.c"
+#line 2927 "sql.c"
break;
- case 84: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */
-#line 240 "sql.y"
+ case 85: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+#line 243 "sql.y"
{
- yylhsminor.yy341.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1;
- yylhsminor.yy341.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1;
- yylhsminor.yy341.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1;
- yylhsminor.yy341.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1;
- yylhsminor.yy341.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1;
- yylhsminor.yy341.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1;
- yylhsminor.yy341.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1;
- yylhsminor.yy341.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1;
- yylhsminor.yy341.stat = yymsp[0].minor.yy0;
+ yylhsminor.yy171.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1;
+ yylhsminor.yy171.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1;
+ yylhsminor.yy171.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1;
+ yylhsminor.yy171.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1;
+ yylhsminor.yy171.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1;
+ yylhsminor.yy171.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1;
+ yylhsminor.yy171.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1;
+ yylhsminor.yy171.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1;
+ yylhsminor.yy171.stat = yymsp[0].minor.yy0;
}
-#line 2916 "sql.c"
- yymsp[-8].minor.yy341 = yylhsminor.yy341;
+#line 2942 "sql.c"
+ yymsp[-8].minor.yy171 = yylhsminor.yy171;
break;
- case 85: /* intitemlist ::= intitemlist COMMA intitem */
- case 154: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==154);
-#line 256 "sql.y"
-{ yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1); }
-#line 2923 "sql.c"
- yymsp[-2].minor.yy131 = yylhsminor.yy131;
- break;
- case 86: /* intitemlist ::= intitem */
- case 155: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==155);
-#line 257 "sql.y"
-{ yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1); }
-#line 2930 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 87: /* intitem ::= INTEGER */
- case 156: /* tagitem ::= INTEGER */ yytestcase(yyruleno==156);
- case 157: /* tagitem ::= FLOAT */ yytestcase(yyruleno==157);
- case 158: /* tagitem ::= STRING */ yytestcase(yyruleno==158);
- case 159: /* tagitem ::= BOOL */ yytestcase(yyruleno==159);
+ case 86: /* intitemlist ::= intitemlist COMMA intitem */
+ case 155: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==155);
#line 259 "sql.y"
-{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); }
-#line 2940 "sql.c"
- yymsp[0].minor.yy516 = yylhsminor.yy516;
- break;
- case 88: /* keep ::= KEEP intitemlist */
-#line 263 "sql.y"
-{ yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; }
-#line 2946 "sql.c"
- break;
- case 89: /* cache ::= CACHE INTEGER */
- case 90: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==90);
- case 91: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==91);
- case 92: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==92);
- case 93: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==93);
- case 94: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==94);
- case 95: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==95);
- case 96: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==96);
- case 97: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==97);
- case 98: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==98);
- case 99: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==99);
- case 100: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==100);
- case 101: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==101);
- case 102: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==102);
- case 103: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==103);
-#line 265 "sql.y"
+{ yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1); }
+#line 2949 "sql.c"
+ yymsp[-2].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 87: /* intitemlist ::= intitem */
+ case 156: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==156);
+#line 260 "sql.y"
+{ yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1); }
+#line 2956 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 88: /* intitem ::= INTEGER */
+ case 157: /* tagitem ::= INTEGER */ yytestcase(yyruleno==157);
+ case 158: /* tagitem ::= FLOAT */ yytestcase(yyruleno==158);
+ case 159: /* tagitem ::= STRING */ yytestcase(yyruleno==159);
+ case 160: /* tagitem ::= BOOL */ yytestcase(yyruleno==160);
+#line 262 "sql.y"
+{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); }
+#line 2966 "sql.c"
+ yymsp[0].minor.yy430 = yylhsminor.yy430;
+ break;
+ case 89: /* keep ::= KEEP intitemlist */
+#line 266 "sql.y"
+{ yymsp[-1].minor.yy421 = yymsp[0].minor.yy421; }
+#line 2972 "sql.c"
+ break;
+ case 90: /* cache ::= CACHE INTEGER */
+ case 91: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==91);
+ case 92: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==92);
+ case 93: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==93);
+ case 94: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==94);
+ case 95: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==95);
+ case 96: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==96);
+ case 97: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==97);
+ case 98: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==98);
+ case 99: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==99);
+ case 100: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==100);
+ case 101: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==101);
+ case 102: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==102);
+ case 103: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==103);
+ case 104: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==104);
+#line 268 "sql.y"
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
-#line 2965 "sql.c"
- break;
- case 104: /* db_optr ::= */
-#line 282 "sql.y"
-{setDefaultCreateDbOption(&yymsp[1].minor.yy42); yymsp[1].minor.yy42.dbType = TSDB_DB_TYPE_DEFAULT;}
-#line 2970 "sql.c"
+#line 2991 "sql.c"
break;
- case 105: /* db_optr ::= db_optr cache */
-#line 284 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 2975 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
- break;
- case 106: /* db_optr ::= db_optr replica */
- case 123: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==123);
+ case 105: /* db_optr ::= */
#line 285 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 2982 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
- break;
- case 107: /* db_optr ::= db_optr quorum */
- case 124: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==124);
-#line 286 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 2989 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
- break;
- case 108: /* db_optr ::= db_optr days */
+{setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;}
+#line 2996 "sql.c"
+ break;
+ case 106: /* db_optr ::= db_optr cache */
#line 287 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 2995 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3001 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 109: /* db_optr ::= db_optr minrows */
+ case 107: /* db_optr ::= db_optr replica */
+ case 124: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==124);
#line 288 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
-#line 3001 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3008 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 110: /* db_optr ::= db_optr maxrows */
+ case 108: /* db_optr ::= db_optr quorum */
+ case 125: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==125);
#line 289 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
-#line 3007 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3015 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 111: /* db_optr ::= db_optr blocks */
- case 126: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==126);
+ case 109: /* db_optr ::= db_optr days */
#line 290 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3014 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3021 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 112: /* db_optr ::= db_optr ctime */
+ case 110: /* db_optr ::= db_optr minrows */
#line 291 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3020 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
+#line 3027 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 113: /* db_optr ::= db_optr wal */
+ case 111: /* db_optr ::= db_optr maxrows */
#line 292 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3026 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
+#line 3033 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 114: /* db_optr ::= db_optr fsync */
+ case 112: /* db_optr ::= db_optr blocks */
+ case 127: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==127);
#line 293 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3032 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3040 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 115: /* db_optr ::= db_optr comp */
- case 127: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==127);
+ case 113: /* db_optr ::= db_optr ctime */
#line 294 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3039 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3046 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 116: /* db_optr ::= db_optr prec */
+ case 114: /* db_optr ::= db_optr wal */
#line 295 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.precision = yymsp[0].minor.yy0; }
-#line 3045 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3052 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 117: /* db_optr ::= db_optr keep */
- case 125: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==125);
+ case 115: /* db_optr ::= db_optr fsync */
#line 296 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.keep = yymsp[0].minor.yy131; }
-#line 3052 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3058 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 118: /* db_optr ::= db_optr update */
- case 128: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==128);
+ case 116: /* db_optr ::= db_optr comp */
+ case 128: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==128);
#line 297 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3059 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3065 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
break;
- case 119: /* db_optr ::= db_optr cachelast */
- case 129: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==129);
+ case 117: /* db_optr ::= db_optr prec */
#line 298 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3066 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
- break;
- case 120: /* topic_optr ::= db_optr */
- case 130: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==130);
-#line 302 "sql.y"
-{ yylhsminor.yy42 = yymsp[0].minor.yy42; yylhsminor.yy42.dbType = TSDB_DB_TYPE_TOPIC; }
-#line 3073 "sql.c"
- yymsp[0].minor.yy42 = yylhsminor.yy42;
- break;
- case 121: /* topic_optr ::= topic_optr partitions */
- case 131: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==131);
-#line 303 "sql.y"
-{ yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3080 "sql.c"
- yymsp[-1].minor.yy42 = yylhsminor.yy42;
- break;
- case 122: /* alter_db_optr ::= */
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.precision = yymsp[0].minor.yy0; }
+#line 3071 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 118: /* db_optr ::= db_optr keep */
+ case 126: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==126);
+#line 299 "sql.y"
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.keep = yymsp[0].minor.yy421; }
+#line 3078 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 119: /* db_optr ::= db_optr update */
+ case 129: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==129);
+#line 300 "sql.y"
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3085 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 120: /* db_optr ::= db_optr cachelast */
+ case 130: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==130);
+#line 301 "sql.y"
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3092 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 121: /* topic_optr ::= db_optr */
+ case 131: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==131);
+#line 305 "sql.y"
+{ yylhsminor.yy90 = yymsp[0].minor.yy90; yylhsminor.yy90.dbType = TSDB_DB_TYPE_TOPIC; }
+#line 3099 "sql.c"
+ yymsp[0].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 122: /* topic_optr ::= topic_optr partitions */
+ case 132: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==132);
#line 306 "sql.y"
-{ setDefaultCreateDbOption(&yymsp[1].minor.yy42); yymsp[1].minor.yy42.dbType = TSDB_DB_TYPE_DEFAULT;}
-#line 3086 "sql.c"
+{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3106 "sql.c"
+ yymsp[-1].minor.yy90 = yylhsminor.yy90;
+ break;
+ case 123: /* alter_db_optr ::= */
+#line 309 "sql.y"
+{ setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;}
+#line 3112 "sql.c"
break;
- case 132: /* typename ::= ids */
-#line 326 "sql.y"
+ case 133: /* typename ::= ids */
+#line 329 "sql.y"
{
yymsp[0].minor.yy0.type = 0;
- tSetColumnType (&yylhsminor.yy163, &yymsp[0].minor.yy0);
+ tSetColumnType (&yylhsminor.yy183, &yymsp[0].minor.yy0);
}
-#line 3094 "sql.c"
- yymsp[0].minor.yy163 = yylhsminor.yy163;
+#line 3120 "sql.c"
+ yymsp[0].minor.yy183 = yylhsminor.yy183;
break;
- case 133: /* typename ::= ids LP signed RP */
-#line 332 "sql.y"
+ case 134: /* typename ::= ids LP signed RP */
+#line 335 "sql.y"
{
- if (yymsp[-1].minor.yy459 <= 0) {
+ if (yymsp[-1].minor.yy325 <= 0) {
yymsp[-3].minor.yy0.type = 0;
- tSetColumnType(&yylhsminor.yy163, &yymsp[-3].minor.yy0);
+ tSetColumnType(&yylhsminor.yy183, &yymsp[-3].minor.yy0);
} else {
- yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy459; // negative value of name length
- tSetColumnType(&yylhsminor.yy163, &yymsp[-3].minor.yy0);
+ yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy325; // negative value of name length
+ tSetColumnType(&yylhsminor.yy183, &yymsp[-3].minor.yy0);
}
}
-#line 3108 "sql.c"
- yymsp[-3].minor.yy163 = yylhsminor.yy163;
+#line 3134 "sql.c"
+ yymsp[-3].minor.yy183 = yylhsminor.yy183;
break;
- case 134: /* typename ::= ids UNSIGNED */
-#line 343 "sql.y"
+ case 135: /* typename ::= ids UNSIGNED */
+#line 346 "sql.y"
{
yymsp[-1].minor.yy0.type = 0;
yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z);
- tSetColumnType (&yylhsminor.yy163, &yymsp[-1].minor.yy0);
+ tSetColumnType (&yylhsminor.yy183, &yymsp[-1].minor.yy0);
}
-#line 3118 "sql.c"
- yymsp[-1].minor.yy163 = yylhsminor.yy163;
- break;
- case 135: /* signed ::= INTEGER */
-#line 350 "sql.y"
-{ yylhsminor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3124 "sql.c"
- yymsp[0].minor.yy459 = yylhsminor.yy459;
- break;
- case 136: /* signed ::= PLUS INTEGER */
-#line 351 "sql.y"
-{ yymsp[-1].minor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
-#line 3130 "sql.c"
- break;
- case 137: /* signed ::= MINUS INTEGER */
-#line 352 "sql.y"
-{ yymsp[-1].minor.yy459 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
-#line 3135 "sql.c"
- break;
- case 141: /* cmd ::= CREATE TABLE create_table_list */
-#line 358 "sql.y"
-{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy272;}
-#line 3140 "sql.c"
- break;
- case 142: /* create_table_list ::= create_from_stable */
-#line 362 "sql.y"
+#line 3144 "sql.c"
+ yymsp[-1].minor.yy183 = yylhsminor.yy183;
+ break;
+ case 136: /* signed ::= INTEGER */
+#line 353 "sql.y"
+{ yylhsminor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3150 "sql.c"
+ yymsp[0].minor.yy325 = yylhsminor.yy325;
+ break;
+ case 137: /* signed ::= PLUS INTEGER */
+#line 354 "sql.y"
+{ yymsp[-1].minor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+#line 3156 "sql.c"
+ break;
+ case 138: /* signed ::= MINUS INTEGER */
+#line 355 "sql.y"
+{ yymsp[-1].minor.yy325 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
+#line 3161 "sql.c"
+ break;
+ case 142: /* cmd ::= CREATE TABLE create_table_list */
+#line 361 "sql.y"
+{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy438;}
+#line 3166 "sql.c"
+ break;
+ case 143: /* create_table_list ::= create_from_stable */
+#line 365 "sql.y"
{
SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql));
pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo));
- taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy96);
+ taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy152);
pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE;
- yylhsminor.yy272 = pCreateTable;
+ yylhsminor.yy438 = pCreateTable;
}
-#line 3152 "sql.c"
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+#line 3178 "sql.c"
+ yymsp[0].minor.yy438 = yylhsminor.yy438;
break;
- case 143: /* create_table_list ::= create_table_list create_from_stable */
-#line 371 "sql.y"
+ case 144: /* create_table_list ::= create_table_list create_from_stable */
+#line 374 "sql.y"
{
- taosArrayPush(yymsp[-1].minor.yy272->childTableInfo, &yymsp[0].minor.yy96);
- yylhsminor.yy272 = yymsp[-1].minor.yy272;
+ taosArrayPush(yymsp[-1].minor.yy438->childTableInfo, &yymsp[0].minor.yy152);
+ yylhsminor.yy438 = yymsp[-1].minor.yy438;
}
-#line 3161 "sql.c"
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+#line 3187 "sql.c"
+ yymsp[-1].minor.yy438 = yylhsminor.yy438;
break;
- case 144: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
-#line 377 "sql.y"
+ case 145: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+#line 380 "sql.y"
{
- yylhsminor.yy272 = tSetCreateTableInfo(yymsp[-1].minor.yy131, NULL, NULL, TSQL_CREATE_TABLE);
- setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-1].minor.yy421, NULL, NULL, TSQL_CREATE_TABLE);
+ setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0);
}
-#line 3173 "sql.c"
- yymsp[-5].minor.yy272 = yylhsminor.yy272;
+#line 3199 "sql.c"
+ yymsp[-5].minor.yy438 = yylhsminor.yy438;
break;
- case 145: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
-#line 387 "sql.y"
+ case 146: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+#line 390 "sql.y"
{
- yylhsminor.yy272 = tSetCreateTableInfo(yymsp[-5].minor.yy131, yymsp[-1].minor.yy131, NULL, TSQL_CREATE_STABLE);
- setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, TSQL_CREATE_STABLE);
+ setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
}
-#line 3185 "sql.c"
- yymsp[-9].minor.yy272 = yylhsminor.yy272;
+#line 3211 "sql.c"
+ yymsp[-9].minor.yy438 = yylhsminor.yy438;
break;
- case 146: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
-#line 398 "sql.y"
+ case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+#line 401 "sql.y"
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
- yylhsminor.yy96 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy131, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
+ yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy421, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
}
-#line 3195 "sql.c"
- yymsp[-9].minor.yy96 = yylhsminor.yy96;
+#line 3221 "sql.c"
+ yymsp[-9].minor.yy152 = yylhsminor.yy152;
break;
- case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
-#line 404 "sql.y"
+ case 148: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+#line 407 "sql.y"
{
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n;
- yylhsminor.yy96 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy131, yymsp[-1].minor.yy131, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0);
+ yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0);
}
-#line 3205 "sql.c"
- yymsp[-12].minor.yy96 = yylhsminor.yy96;
- break;
- case 148: /* tagNamelist ::= tagNamelist COMMA ids */
-#line 412 "sql.y"
-{taosArrayPush(yymsp[-2].minor.yy131, &yymsp[0].minor.yy0); yylhsminor.yy131 = yymsp[-2].minor.yy131; }
-#line 3211 "sql.c"
- yymsp[-2].minor.yy131 = yylhsminor.yy131;
- break;
- case 149: /* tagNamelist ::= ids */
-#line 413 "sql.y"
-{yylhsminor.yy131 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy131, &yymsp[0].minor.yy0);}
-#line 3217 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 150: /* create_table_args ::= ifnotexists ids cpxName AS select */
-#line 417 "sql.y"
+#line 3231 "sql.c"
+ yymsp[-12].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 149: /* tagNamelist ::= tagNamelist COMMA ids */
+#line 415 "sql.y"
+{taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy0); yylhsminor.yy421 = yymsp[-2].minor.yy421; }
+#line 3237 "sql.c"
+ yymsp[-2].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 150: /* tagNamelist ::= ids */
+#line 416 "sql.y"
+{yylhsminor.yy421 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy0);}
+#line 3243 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 151: /* create_table_args ::= ifnotexists ids cpxName AS select */
+#line 420 "sql.y"
{
- yylhsminor.yy272 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy256, TSQL_CREATE_STREAM);
- setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy438 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy56, TSQL_CREATE_STREAM);
+ setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0);
}
-#line 3229 "sql.c"
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 151: /* columnlist ::= columnlist COMMA column */
-#line 428 "sql.y"
-{taosArrayPush(yymsp[-2].minor.yy131, &yymsp[0].minor.yy163); yylhsminor.yy131 = yymsp[-2].minor.yy131; }
-#line 3235 "sql.c"
- yymsp[-2].minor.yy131 = yylhsminor.yy131;
- break;
- case 152: /* columnlist ::= column */
-#line 429 "sql.y"
-{yylhsminor.yy131 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy131, &yymsp[0].minor.yy163);}
-#line 3241 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 153: /* column ::= ids typename */
-#line 433 "sql.y"
-{
- tSetColumnInfo(&yylhsminor.yy163, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy163);
-}
-#line 3249 "sql.c"
- yymsp[-1].minor.yy163 = yylhsminor.yy163;
- break;
- case 160: /* tagitem ::= NULL */
-#line 448 "sql.y"
-{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); }
#line 3255 "sql.c"
- yymsp[0].minor.yy516 = yylhsminor.yy516;
+ yymsp[-4].minor.yy438 = yylhsminor.yy438;
break;
- case 161: /* tagitem ::= NOW */
-#line 449 "sql.y"
-{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0);}
+ case 152: /* columnlist ::= columnlist COMMA column */
+#line 431 "sql.y"
+{taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy183); yylhsminor.yy421 = yymsp[-2].minor.yy421; }
#line 3261 "sql.c"
- yymsp[0].minor.yy516 = yylhsminor.yy516;
+ yymsp[-2].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 153: /* columnlist ::= column */
+#line 432 "sql.y"
+{yylhsminor.yy421 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy183);}
+#line 3267 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
break;
- case 162: /* tagitem ::= MINUS INTEGER */
- case 163: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==163);
- case 164: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==164);
- case 165: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==165);
+ case 154: /* column ::= ids typename */
+#line 436 "sql.y"
+{
+ tSetColumnInfo(&yylhsminor.yy183, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy183);
+}
+#line 3275 "sql.c"
+ yymsp[-1].minor.yy183 = yylhsminor.yy183;
+ break;
+ case 161: /* tagitem ::= NULL */
#line 451 "sql.y"
+{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); }
+#line 3281 "sql.c"
+ yymsp[0].minor.yy430 = yylhsminor.yy430;
+ break;
+ case 162: /* tagitem ::= NOW */
+#line 452 "sql.y"
+{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0);}
+#line 3287 "sql.c"
+ yymsp[0].minor.yy430 = yylhsminor.yy430;
+ break;
+ case 163: /* tagitem ::= MINUS INTEGER */
+ case 164: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==164);
+ case 165: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==165);
+ case 166: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==166);
+#line 454 "sql.y"
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type;
toTSDBType(yymsp[-1].minor.yy0.type);
- tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0);
+ tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0);
}
-#line 3275 "sql.c"
- yymsp[-1].minor.yy516 = yylhsminor.yy516;
+#line 3301 "sql.c"
+ yymsp[-1].minor.yy430 = yylhsminor.yy430;
break;
- case 166: /* select ::= SELECT selcollist from where_opt interval_opt sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
-#line 482 "sql.y"
+ case 167: /* select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
+#line 485 "sql.y"
{
- yylhsminor.yy256 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy131, yymsp[-11].minor.yy544, yymsp[-10].minor.yy46, yymsp[-4].minor.yy131, yymsp[-2].minor.yy131, &yymsp[-9].minor.yy530, &yymsp[-7].minor.yy39, &yymsp[-6].minor.yy538, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy131, &yymsp[0].minor.yy284, &yymsp[-1].minor.yy284, yymsp[-3].minor.yy46);
+ yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy421, yymsp[-11].minor.yy8, yymsp[-10].minor.yy439, yymsp[-4].minor.yy421, yymsp[-2].minor.yy421, &yymsp[-9].minor.yy400, &yymsp[-7].minor.yy147, &yymsp[-6].minor.yy40, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, &yymsp[0].minor.yy166, &yymsp[-1].minor.yy166, yymsp[-3].minor.yy439);
}
-#line 3283 "sql.c"
- yymsp[-13].minor.yy256 = yylhsminor.yy256;
- break;
- case 167: /* select ::= LP select RP */
-#line 486 "sql.y"
-{yymsp[-2].minor.yy256 = yymsp[-1].minor.yy256;}
-#line 3289 "sql.c"
- break;
- case 168: /* union ::= select */
-#line 490 "sql.y"
-{ yylhsminor.yy131 = setSubclause(NULL, yymsp[0].minor.yy256); }
-#line 3294 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 169: /* union ::= union UNION ALL select */
-#line 491 "sql.y"
-{ yylhsminor.yy131 = appendSelectClause(yymsp[-3].minor.yy131, yymsp[0].minor.yy256); }
-#line 3300 "sql.c"
- yymsp[-3].minor.yy131 = yylhsminor.yy131;
- break;
- case 170: /* cmd ::= union */
+#line 3309 "sql.c"
+ yymsp[-13].minor.yy56 = yylhsminor.yy56;
+ break;
+ case 168: /* select ::= LP select RP */
+#line 489 "sql.y"
+{yymsp[-2].minor.yy56 = yymsp[-1].minor.yy56;}
+#line 3315 "sql.c"
+ break;
+ case 169: /* union ::= select */
#line 493 "sql.y"
-{ setSqlInfo(pInfo, yymsp[0].minor.yy131, NULL, TSDB_SQL_SELECT); }
-#line 3306 "sql.c"
+{ yylhsminor.yy421 = setSubclause(NULL, yymsp[0].minor.yy56); }
+#line 3320 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
break;
- case 171: /* select ::= SELECT selcollist */
-#line 500 "sql.y"
-{
- yylhsminor.yy256 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy131, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-#line 3313 "sql.c"
- yymsp[-1].minor.yy256 = yylhsminor.yy256;
- break;
- case 172: /* sclp ::= selcollist COMMA */
-#line 512 "sql.y"
-{yylhsminor.yy131 = yymsp[-1].minor.yy131;}
-#line 3319 "sql.c"
- yymsp[-1].minor.yy131 = yylhsminor.yy131;
- break;
- case 173: /* sclp ::= */
- case 203: /* orderby_opt ::= */ yytestcase(yyruleno==203);
-#line 513 "sql.y"
-{yymsp[1].minor.yy131 = 0;}
+ case 170: /* union ::= union UNION ALL select */
+#line 494 "sql.y"
+{ yylhsminor.yy421 = appendSelectClause(yymsp[-3].minor.yy421, yymsp[0].minor.yy56); }
#line 3326 "sql.c"
+ yymsp[-3].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 171: /* cmd ::= union */
+#line 496 "sql.y"
+{ setSqlInfo(pInfo, yymsp[0].minor.yy421, NULL, TSDB_SQL_SELECT); }
+#line 3332 "sql.c"
break;
- case 174: /* selcollist ::= sclp distinct expr as */
-#line 514 "sql.y"
+ case 172: /* select ::= SELECT selcollist */
+#line 503 "sql.y"
{
- yylhsminor.yy131 = tSqlExprListAppend(yymsp[-3].minor.yy131, yymsp[-1].minor.yy46, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
+ yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy421, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
-#line 3333 "sql.c"
- yymsp[-3].minor.yy131 = yylhsminor.yy131;
+#line 3339 "sql.c"
+ yymsp[-1].minor.yy56 = yylhsminor.yy56;
+ break;
+ case 173: /* sclp ::= selcollist COMMA */
+#line 515 "sql.y"
+{yylhsminor.yy421 = yymsp[-1].minor.yy421;}
+#line 3345 "sql.c"
+ yymsp[-1].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 174: /* sclp ::= */
+ case 206: /* orderby_opt ::= */ yytestcase(yyruleno==206);
+#line 516 "sql.y"
+{yymsp[1].minor.yy421 = 0;}
+#line 3352 "sql.c"
+ break;
+ case 175: /* selcollist ::= sclp distinct expr as */
+#line 517 "sql.y"
+{
+ yylhsminor.yy421 = tSqlExprListAppend(yymsp[-3].minor.yy421, yymsp[-1].minor.yy439, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
+}
+#line 3359 "sql.c"
+ yymsp[-3].minor.yy421 = yylhsminor.yy421;
break;
- case 175: /* selcollist ::= sclp STAR */
-#line 518 "sql.y"
+ case 176: /* selcollist ::= sclp STAR */
+#line 521 "sql.y"
{
tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL);
- yylhsminor.yy131 = tSqlExprListAppend(yymsp[-1].minor.yy131, pNode, 0, 0);
+ yylhsminor.yy421 = tSqlExprListAppend(yymsp[-1].minor.yy421, pNode, 0, 0);
}
-#line 3342 "sql.c"
- yymsp[-1].minor.yy131 = yylhsminor.yy131;
+#line 3368 "sql.c"
+ yymsp[-1].minor.yy421 = yylhsminor.yy421;
break;
- case 176: /* as ::= AS ids */
-#line 526 "sql.y"
+ case 177: /* as ::= AS ids */
+#line 529 "sql.y"
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
-#line 3348 "sql.c"
+#line 3374 "sql.c"
break;
- case 177: /* as ::= ids */
-#line 527 "sql.y"
+ case 178: /* as ::= ids */
+#line 530 "sql.y"
{ yylhsminor.yy0 = yymsp[0].minor.yy0; }
-#line 3353 "sql.c"
+#line 3379 "sql.c"
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 178: /* as ::= */
-#line 528 "sql.y"
+ case 179: /* as ::= */
+#line 531 "sql.y"
{ yymsp[1].minor.yy0.n = 0; }
-#line 3359 "sql.c"
+#line 3385 "sql.c"
break;
- case 179: /* distinct ::= DISTINCT */
-#line 531 "sql.y"
+ case 180: /* distinct ::= DISTINCT */
+#line 534 "sql.y"
{ yylhsminor.yy0 = yymsp[0].minor.yy0; }
-#line 3364 "sql.c"
+#line 3390 "sql.c"
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 181: /* from ::= FROM tablelist */
- case 182: /* from ::= FROM sub */ yytestcase(yyruleno==182);
-#line 537 "sql.y"
-{yymsp[-1].minor.yy544 = yymsp[0].minor.yy544;}
-#line 3371 "sql.c"
- break;
- case 183: /* sub ::= LP union RP */
-#line 542 "sql.y"
-{yymsp[-2].minor.yy544 = addSubqueryElem(NULL, yymsp[-1].minor.yy131, NULL);}
-#line 3376 "sql.c"
- break;
- case 184: /* sub ::= LP union RP ids */
-#line 543 "sql.y"
-{yymsp[-3].minor.yy544 = addSubqueryElem(NULL, yymsp[-2].minor.yy131, &yymsp[0].minor.yy0);}
-#line 3381 "sql.c"
- break;
- case 185: /* sub ::= sub COMMA LP union RP ids */
-#line 544 "sql.y"
-{yylhsminor.yy544 = addSubqueryElem(yymsp[-5].minor.yy544, yymsp[-2].minor.yy131, &yymsp[0].minor.yy0);}
-#line 3386 "sql.c"
- yymsp[-5].minor.yy544 = yylhsminor.yy544;
- break;
- case 186: /* tablelist ::= ids cpxName */
-#line 548 "sql.y"
+ case 182: /* from ::= FROM tablelist */
+ case 183: /* from ::= FROM sub */ yytestcase(yyruleno==183);
+#line 540 "sql.y"
+{yymsp[-1].minor.yy8 = yymsp[0].minor.yy8;}
+#line 3397 "sql.c"
+ break;
+ case 184: /* sub ::= LP union RP */
+#line 545 "sql.y"
+{yymsp[-2].minor.yy8 = addSubqueryElem(NULL, yymsp[-1].minor.yy421, NULL);}
+#line 3402 "sql.c"
+ break;
+ case 185: /* sub ::= LP union RP ids */
+#line 546 "sql.y"
+{yymsp[-3].minor.yy8 = addSubqueryElem(NULL, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);}
+#line 3407 "sql.c"
+ break;
+ case 186: /* sub ::= sub COMMA LP union RP ids */
+#line 547 "sql.y"
+{yylhsminor.yy8 = addSubqueryElem(yymsp[-5].minor.yy8, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);}
+#line 3412 "sql.c"
+ yymsp[-5].minor.yy8 = yylhsminor.yy8;
+ break;
+ case 187: /* tablelist ::= ids cpxName */
+#line 551 "sql.y"
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- yylhsminor.yy544 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL);
+ yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL);
}
-#line 3395 "sql.c"
- yymsp[-1].minor.yy544 = yylhsminor.yy544;
+#line 3421 "sql.c"
+ yymsp[-1].minor.yy8 = yylhsminor.yy8;
break;
- case 187: /* tablelist ::= ids cpxName ids */
-#line 553 "sql.y"
+ case 188: /* tablelist ::= ids cpxName ids */
+#line 556 "sql.y"
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
- yylhsminor.yy544 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
+ yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
-#line 3404 "sql.c"
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+#line 3430 "sql.c"
+ yymsp[-2].minor.yy8 = yylhsminor.yy8;
break;
- case 188: /* tablelist ::= tablelist COMMA ids cpxName */
-#line 558 "sql.y"
+ case 189: /* tablelist ::= tablelist COMMA ids cpxName */
+#line 561 "sql.y"
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- yylhsminor.yy544 = setTableNameList(yymsp[-3].minor.yy544, &yymsp[-1].minor.yy0, NULL);
+ yylhsminor.yy8 = setTableNameList(yymsp[-3].minor.yy8, &yymsp[-1].minor.yy0, NULL);
}
-#line 3413 "sql.c"
- yymsp[-3].minor.yy544 = yylhsminor.yy544;
+#line 3439 "sql.c"
+ yymsp[-3].minor.yy8 = yylhsminor.yy8;
break;
- case 189: /* tablelist ::= tablelist COMMA ids cpxName ids */
-#line 563 "sql.y"
+ case 190: /* tablelist ::= tablelist COMMA ids cpxName ids */
+#line 566 "sql.y"
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
- yylhsminor.yy544 = setTableNameList(yymsp[-4].minor.yy544, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
+ yylhsminor.yy8 = setTableNameList(yymsp[-4].minor.yy8, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
-#line 3422 "sql.c"
- yymsp[-4].minor.yy544 = yylhsminor.yy544;
+#line 3448 "sql.c"
+ yymsp[-4].minor.yy8 = yylhsminor.yy8;
break;
- case 190: /* tmvar ::= VARIABLE */
-#line 570 "sql.y"
+ case 191: /* tmvar ::= VARIABLE */
+#line 573 "sql.y"
{yylhsminor.yy0 = yymsp[0].minor.yy0;}
-#line 3428 "sql.c"
+#line 3454 "sql.c"
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
- case 191: /* interval_opt ::= INTERVAL LP tmvar RP */
-#line 573 "sql.y"
-{yymsp[-3].minor.yy530.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy530.offset.n = 0;}
-#line 3434 "sql.c"
- break;
- case 192: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */
-#line 574 "sql.y"
-{yymsp[-5].minor.yy530.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy530.offset = yymsp[-1].minor.yy0;}
-#line 3439 "sql.c"
+ case 192: /* interval_option ::= intervalKey LP tmvar RP */
+#line 576 "sql.y"
+{yylhsminor.yy400.interval = yymsp[-1].minor.yy0; yylhsminor.yy400.offset.n = 0; yylhsminor.yy400.token = yymsp[-3].minor.yy104;}
+#line 3460 "sql.c"
+ yymsp[-3].minor.yy400 = yylhsminor.yy400;
break;
- case 193: /* interval_opt ::= */
-#line 575 "sql.y"
-{memset(&yymsp[1].minor.yy530, 0, sizeof(yymsp[1].minor.yy530));}
-#line 3444 "sql.c"
+ case 193: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
+#line 577 "sql.y"
+{yylhsminor.yy400.interval = yymsp[-3].minor.yy0; yylhsminor.yy400.offset = yymsp[-1].minor.yy0; yylhsminor.yy400.token = yymsp[-5].minor.yy104;}
+#line 3466 "sql.c"
+ yymsp[-5].minor.yy400 = yylhsminor.yy400;
break;
- case 194: /* session_option ::= */
+ case 194: /* interval_option ::= */
#line 578 "sql.y"
-{yymsp[1].minor.yy39.col.n = 0; yymsp[1].minor.yy39.gap.n = 0;}
-#line 3449 "sql.c"
+{memset(&yymsp[1].minor.yy400, 0, sizeof(yymsp[1].minor.yy400));}
+#line 3472 "sql.c"
+ break;
+ case 195: /* intervalKey ::= INTERVAL */
+#line 581 "sql.y"
+{yymsp[0].minor.yy104 = TK_INTERVAL;}
+#line 3477 "sql.c"
+ break;
+ case 196: /* intervalKey ::= EVERY */
+#line 582 "sql.y"
+{yymsp[0].minor.yy104 = TK_EVERY; }
+#line 3482 "sql.c"
break;
- case 195: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
-#line 579 "sql.y"
+ case 197: /* session_option ::= */
+#line 585 "sql.y"
+{yymsp[1].minor.yy147.col.n = 0; yymsp[1].minor.yy147.gap.n = 0;}
+#line 3487 "sql.c"
+ break;
+ case 198: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+#line 586 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- yymsp[-6].minor.yy39.col = yymsp[-4].minor.yy0;
- yymsp[-6].minor.yy39.gap = yymsp[-1].minor.yy0;
+ yymsp[-6].minor.yy147.col = yymsp[-4].minor.yy0;
+ yymsp[-6].minor.yy147.gap = yymsp[-1].minor.yy0;
}
-#line 3458 "sql.c"
+#line 3496 "sql.c"
break;
- case 196: /* windowstate_option ::= */
-#line 585 "sql.y"
-{ yymsp[1].minor.yy538.col.n = 0; yymsp[1].minor.yy538.col.z = NULL;}
-#line 3463 "sql.c"
+ case 199: /* windowstate_option ::= */
+#line 593 "sql.y"
+{ yymsp[1].minor.yy40.col.n = 0; yymsp[1].minor.yy40.col.z = NULL;}
+#line 3501 "sql.c"
break;
- case 197: /* windowstate_option ::= STATE_WINDOW LP ids RP */
-#line 586 "sql.y"
-{ yymsp[-3].minor.yy538.col = yymsp[-1].minor.yy0; }
-#line 3468 "sql.c"
+ case 200: /* windowstate_option ::= STATE_WINDOW LP ids RP */
+#line 594 "sql.y"
+{ yymsp[-3].minor.yy40.col = yymsp[-1].minor.yy0; }
+#line 3506 "sql.c"
break;
- case 198: /* fill_opt ::= */
-#line 590 "sql.y"
-{ yymsp[1].minor.yy131 = 0; }
-#line 3473 "sql.c"
+ case 201: /* fill_opt ::= */
+#line 598 "sql.y"
+{ yymsp[1].minor.yy421 = 0; }
+#line 3511 "sql.c"
break;
- case 199: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
-#line 591 "sql.y"
+ case 202: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+#line 599 "sql.y"
{
tVariant A = {0};
toTSDBType(yymsp[-3].minor.yy0.type);
tVariantCreate(&A, &yymsp[-3].minor.yy0);
- tVariantListInsert(yymsp[-1].minor.yy131, &A, -1, 0);
- yymsp[-5].minor.yy131 = yymsp[-1].minor.yy131;
+ tVariantListInsert(yymsp[-1].minor.yy421, &A, -1, 0);
+ yymsp[-5].minor.yy421 = yymsp[-1].minor.yy421;
}
-#line 3485 "sql.c"
+#line 3523 "sql.c"
break;
- case 200: /* fill_opt ::= FILL LP ID RP */
-#line 600 "sql.y"
+ case 203: /* fill_opt ::= FILL LP ID RP */
+#line 608 "sql.y"
{
toTSDBType(yymsp[-1].minor.yy0.type);
- yymsp[-3].minor.yy131 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
+ yymsp[-3].minor.yy421 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
}
-#line 3493 "sql.c"
+#line 3531 "sql.c"
break;
- case 201: /* sliding_opt ::= SLIDING LP tmvar RP */
-#line 606 "sql.y"
+ case 204: /* sliding_opt ::= SLIDING LP tmvar RP */
+#line 614 "sql.y"
{yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; }
-#line 3498 "sql.c"
+#line 3536 "sql.c"
break;
- case 202: /* sliding_opt ::= */
-#line 607 "sql.y"
+ case 205: /* sliding_opt ::= */
+#line 615 "sql.y"
{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; }
-#line 3503 "sql.c"
+#line 3541 "sql.c"
break;
- case 204: /* orderby_opt ::= ORDER BY sortlist */
-#line 619 "sql.y"
-{yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;}
-#line 3508 "sql.c"
+ case 207: /* orderby_opt ::= ORDER BY sortlist */
+#line 627 "sql.y"
+{yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;}
+#line 3546 "sql.c"
break;
- case 205: /* sortlist ::= sortlist COMMA item sortorder */
-#line 621 "sql.y"
+ case 208: /* sortlist ::= sortlist COMMA item sortorder */
+#line 629 "sql.y"
{
- yylhsminor.yy131 = tVariantListAppend(yymsp[-3].minor.yy131, &yymsp[-1].minor.yy516, yymsp[0].minor.yy43);
+ yylhsminor.yy421 = tVariantListAppend(yymsp[-3].minor.yy421, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96);
}
-#line 3515 "sql.c"
- yymsp[-3].minor.yy131 = yylhsminor.yy131;
+#line 3553 "sql.c"
+ yymsp[-3].minor.yy421 = yylhsminor.yy421;
break;
- case 206: /* sortlist ::= item sortorder */
-#line 625 "sql.y"
+ case 209: /* sortlist ::= item sortorder */
+#line 633 "sql.y"
{
- yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[-1].minor.yy516, yymsp[0].minor.yy43);
+ yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96);
}
-#line 3523 "sql.c"
- yymsp[-1].minor.yy131 = yylhsminor.yy131;
+#line 3561 "sql.c"
+ yymsp[-1].minor.yy421 = yylhsminor.yy421;
break;
- case 207: /* item ::= ids cpxName */
-#line 630 "sql.y"
+ case 210: /* item ::= ids cpxName */
+#line 638 "sql.y"
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0);
+ tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0);
}
-#line 3534 "sql.c"
- yymsp[-1].minor.yy516 = yylhsminor.yy516;
+#line 3572 "sql.c"
+ yymsp[-1].minor.yy430 = yylhsminor.yy430;
break;
- case 208: /* sortorder ::= ASC */
-#line 638 "sql.y"
-{ yymsp[0].minor.yy43 = TSDB_ORDER_ASC; }
-#line 3540 "sql.c"
+ case 211: /* sortorder ::= ASC */
+#line 646 "sql.y"
+{ yymsp[0].minor.yy96 = TSDB_ORDER_ASC; }
+#line 3578 "sql.c"
break;
- case 209: /* sortorder ::= DESC */
-#line 639 "sql.y"
-{ yymsp[0].minor.yy43 = TSDB_ORDER_DESC;}
-#line 3545 "sql.c"
- break;
- case 210: /* sortorder ::= */
-#line 640 "sql.y"
-{ yymsp[1].minor.yy43 = TSDB_ORDER_ASC; }
-#line 3550 "sql.c"
+ case 212: /* sortorder ::= DESC */
+#line 647 "sql.y"
+{ yymsp[0].minor.yy96 = TSDB_ORDER_DESC;}
+#line 3583 "sql.c"
break;
- case 211: /* groupby_opt ::= */
+ case 213: /* sortorder ::= */
#line 648 "sql.y"
-{ yymsp[1].minor.yy131 = 0;}
-#line 3555 "sql.c"
+{ yymsp[1].minor.yy96 = TSDB_ORDER_ASC; }
+#line 3588 "sql.c"
break;
- case 212: /* groupby_opt ::= GROUP BY grouplist */
-#line 649 "sql.y"
-{ yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;}
-#line 3560 "sql.c"
+ case 214: /* groupby_opt ::= */
+#line 656 "sql.y"
+{ yymsp[1].minor.yy421 = 0;}
+#line 3593 "sql.c"
break;
- case 213: /* grouplist ::= grouplist COMMA item */
-#line 651 "sql.y"
+ case 215: /* groupby_opt ::= GROUP BY grouplist */
+#line 657 "sql.y"
+{ yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;}
+#line 3598 "sql.c"
+ break;
+ case 216: /* grouplist ::= grouplist COMMA item */
+#line 659 "sql.y"
{
- yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1);
+ yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1);
}
-#line 3567 "sql.c"
- yymsp[-2].minor.yy131 = yylhsminor.yy131;
+#line 3605 "sql.c"
+ yymsp[-2].minor.yy421 = yylhsminor.yy421;
break;
- case 214: /* grouplist ::= item */
-#line 655 "sql.y"
+ case 217: /* grouplist ::= item */
+#line 663 "sql.y"
{
- yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1);
+ yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1);
}
-#line 3575 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 215: /* having_opt ::= */
- case 225: /* where_opt ::= */ yytestcase(yyruleno==225);
- case 267: /* expritem ::= */ yytestcase(yyruleno==267);
-#line 662 "sql.y"
-{yymsp[1].minor.yy46 = 0;}
-#line 3583 "sql.c"
+#line 3613 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
break;
- case 216: /* having_opt ::= HAVING expr */
- case 226: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==226);
-#line 663 "sql.y"
-{yymsp[-1].minor.yy46 = yymsp[0].minor.yy46;}
-#line 3589 "sql.c"
- break;
- case 217: /* limit_opt ::= */
- case 221: /* slimit_opt ::= */ yytestcase(yyruleno==221);
-#line 667 "sql.y"
-{yymsp[1].minor.yy284.limit = -1; yymsp[1].minor.yy284.offset = 0;}
-#line 3595 "sql.c"
- break;
- case 218: /* limit_opt ::= LIMIT signed */
- case 222: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==222);
-#line 668 "sql.y"
-{yymsp[-1].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-1].minor.yy284.offset = 0;}
-#line 3601 "sql.c"
- break;
- case 219: /* limit_opt ::= LIMIT signed OFFSET signed */
+ case 218: /* having_opt ::= */
+ case 228: /* where_opt ::= */ yytestcase(yyruleno==228);
+ case 272: /* expritem ::= */ yytestcase(yyruleno==272);
#line 670 "sql.y"
-{ yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;}
-#line 3606 "sql.c"
- break;
- case 220: /* limit_opt ::= LIMIT signed COMMA signed */
-#line 672 "sql.y"
-{ yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;}
-#line 3611 "sql.c"
+{yymsp[1].minor.yy439 = 0;}
+#line 3621 "sql.c"
break;
- case 223: /* slimit_opt ::= SLIMIT signed SOFFSET signed */
+ case 219: /* having_opt ::= HAVING expr */
+ case 229: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==229);
+#line 671 "sql.y"
+{yymsp[-1].minor.yy439 = yymsp[0].minor.yy439;}
+#line 3627 "sql.c"
+ break;
+ case 220: /* limit_opt ::= */
+ case 224: /* slimit_opt ::= */ yytestcase(yyruleno==224);
+#line 675 "sql.y"
+{yymsp[1].minor.yy166.limit = -1; yymsp[1].minor.yy166.offset = 0;}
+#line 3633 "sql.c"
+ break;
+ case 221: /* limit_opt ::= LIMIT signed */
+ case 225: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==225);
+#line 676 "sql.y"
+{yymsp[-1].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-1].minor.yy166.offset = 0;}
+#line 3639 "sql.c"
+ break;
+ case 222: /* limit_opt ::= LIMIT signed OFFSET signed */
#line 678 "sql.y"
-{yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;}
-#line 3616 "sql.c"
+{ yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;}
+#line 3644 "sql.c"
break;
- case 224: /* slimit_opt ::= SLIMIT signed COMMA signed */
+ case 223: /* limit_opt ::= LIMIT signed COMMA signed */
#line 680 "sql.y"
-{yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;}
-#line 3621 "sql.c"
- break;
- case 227: /* expr ::= LP expr RP */
-#line 693 "sql.y"
-{yylhsminor.yy46 = yymsp[-1].minor.yy46; yylhsminor.yy46->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy46->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
-#line 3626 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 228: /* expr ::= ID */
-#line 695 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);}
-#line 3632 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
- break;
- case 229: /* expr ::= ID DOT ID */
-#line 696 "sql.y"
-{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);}
-#line 3638 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 230: /* expr ::= ID DOT STAR */
-#line 697 "sql.y"
-{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);}
-#line 3644 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 231: /* expr ::= INTEGER */
-#line 699 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);}
-#line 3650 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
- break;
- case 232: /* expr ::= MINUS INTEGER */
- case 233: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==233);
-#line 700 "sql.y"
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);}
-#line 3657 "sql.c"
- yymsp[-1].minor.yy46 = yylhsminor.yy46;
- break;
- case 234: /* expr ::= FLOAT */
-#line 702 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);}
-#line 3663 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
- break;
- case 235: /* expr ::= MINUS FLOAT */
- case 236: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==236);
+{ yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;}
+#line 3649 "sql.c"
+ break;
+ case 226: /* slimit_opt ::= SLIMIT signed SOFFSET signed */
+#line 686 "sql.y"
+{yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;}
+#line 3654 "sql.c"
+ break;
+ case 227: /* slimit_opt ::= SLIMIT signed COMMA signed */
+#line 688 "sql.y"
+{yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;}
+#line 3659 "sql.c"
+ break;
+ case 230: /* expr ::= LP expr RP */
+#line 701 "sql.y"
+{yylhsminor.yy439 = yymsp[-1].minor.yy439; yylhsminor.yy439->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy439->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
+#line 3664 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 231: /* expr ::= ID */
#line 703 "sql.y"
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);}
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);}
#line 3670 "sql.c"
- yymsp[-1].minor.yy46 = yylhsminor.yy46;
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
break;
- case 237: /* expr ::= STRING */
-#line 705 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);}
+ case 232: /* expr ::= ID DOT ID */
+#line 704 "sql.y"
+{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);}
#line 3676 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 238: /* expr ::= NOW */
-#line 706 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); }
+ case 233: /* expr ::= ID DOT STAR */
+#line 705 "sql.y"
+{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);}
#line 3682 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 239: /* expr ::= VARIABLE */
+ case 234: /* expr ::= INTEGER */
#line 707 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);}
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);}
#line 3688 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
break;
- case 240: /* expr ::= PLUS VARIABLE */
- case 241: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==241);
+ case 235: /* expr ::= MINUS INTEGER */
+ case 236: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==236);
#line 708 "sql.y"
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);}
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);}
#line 3695 "sql.c"
- yymsp[-1].minor.yy46 = yylhsminor.yy46;
+ yymsp[-1].minor.yy439 = yylhsminor.yy439;
break;
- case 242: /* expr ::= BOOL */
+ case 237: /* expr ::= FLOAT */
#line 710 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);}
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);}
#line 3701 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
break;
- case 243: /* expr ::= NULL */
+ case 238: /* expr ::= MINUS FLOAT */
+ case 239: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==239);
#line 711 "sql.y"
-{ yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);}
-#line 3707 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);}
+#line 3708 "sql.c"
+ yymsp[-1].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 240: /* expr ::= STRING */
+#line 713 "sql.y"
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);}
+#line 3714 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
break;
- case 244: /* expr ::= ID LP exprlist RP */
+ case 241: /* expr ::= NOW */
#line 714 "sql.y"
-{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy46 = tSqlExprCreateFunction(yymsp[-1].minor.yy131, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
-#line 3713 "sql.c"
- yymsp[-3].minor.yy46 = yylhsminor.yy46;
- break;
- case 245: /* expr ::= ID LP STAR RP */
-#line 717 "sql.y"
-{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy46 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
-#line 3719 "sql.c"
- yymsp[-3].minor.yy46 = yylhsminor.yy46;
- break;
- case 246: /* expr ::= expr IS NULL */
-#line 720 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, NULL, TK_ISNULL);}
-#line 3725 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 247: /* expr ::= expr IS NOT NULL */
-#line 721 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-3].minor.yy46, NULL, TK_NOTNULL);}
-#line 3731 "sql.c"
- yymsp[-3].minor.yy46 = yylhsminor.yy46;
- break;
- case 248: /* expr ::= expr LT expr */
-#line 724 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LT);}
-#line 3737 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 249: /* expr ::= expr GT expr */
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); }
+#line 3720 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 242: /* expr ::= VARIABLE */
+#line 715 "sql.y"
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);}
+#line 3726 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 243: /* expr ::= PLUS VARIABLE */
+ case 244: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==244);
+#line 716 "sql.y"
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);}
+#line 3733 "sql.c"
+ yymsp[-1].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 245: /* expr ::= BOOL */
+#line 718 "sql.y"
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);}
+#line 3739 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 246: /* expr ::= NULL */
+#line 719 "sql.y"
+{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);}
+#line 3745 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 247: /* expr ::= ID LP exprlist RP */
+#line 722 "sql.y"
+{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(yymsp[-1].minor.yy421, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
+#line 3751 "sql.c"
+ yymsp[-3].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 248: /* expr ::= ID LP STAR RP */
#line 725 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_GT);}
-#line 3743 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 250: /* expr ::= expr LE expr */
-#line 726 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LE);}
-#line 3749 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 251: /* expr ::= expr GE expr */
-#line 727 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_GE);}
-#line 3755 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 252: /* expr ::= expr NE expr */
+{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
+#line 3757 "sql.c"
+ yymsp[-3].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 249: /* expr ::= expr IS NULL */
#line 728 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_NE);}
-#line 3761 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, NULL, TK_ISNULL);}
+#line 3763 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 253: /* expr ::= expr EQ expr */
+ case 250: /* expr ::= expr IS NOT NULL */
#line 729 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_EQ);}
-#line 3767 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-3].minor.yy439, NULL, TK_NOTNULL);}
+#line 3769 "sql.c"
+ yymsp[-3].minor.yy439 = yylhsminor.yy439;
break;
- case 254: /* expr ::= expr BETWEEN expr AND expr */
-#line 731 "sql.y"
-{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy46); yylhsminor.yy46 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy46, yymsp[-2].minor.yy46, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy46, TK_LE), TK_AND);}
-#line 3773 "sql.c"
- yymsp[-4].minor.yy46 = yylhsminor.yy46;
+ case 251: /* expr ::= expr LT expr */
+#line 732 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LT);}
+#line 3775 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 255: /* expr ::= expr AND expr */
+ case 252: /* expr ::= expr GT expr */
#line 733 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_AND);}
-#line 3779 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GT);}
+#line 3781 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 256: /* expr ::= expr OR expr */
+ case 253: /* expr ::= expr LE expr */
#line 734 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_OR); }
-#line 3785 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 257: /* expr ::= expr PLUS expr */
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LE);}
+#line 3787 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 254: /* expr ::= expr GE expr */
+#line 735 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GE);}
+#line 3793 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 255: /* expr ::= expr NE expr */
+#line 736 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NE);}
+#line 3799 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 256: /* expr ::= expr EQ expr */
#line 737 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_PLUS); }
-#line 3791 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 258: /* expr ::= expr MINUS expr */
-#line 738 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_MINUS); }
-#line 3797 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_EQ);}
+#line 3805 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 259: /* expr ::= expr STAR expr */
+ case 257: /* expr ::= expr BETWEEN expr AND expr */
#line 739 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_STAR); }
-#line 3803 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 260: /* expr ::= expr SLASH expr */
-#line 740 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_DIVIDE);}
-#line 3809 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
+{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy439); yylhsminor.yy439 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy439, yymsp[-2].minor.yy439, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy439, TK_LE), TK_AND);}
+#line 3811 "sql.c"
+ yymsp[-4].minor.yy439 = yylhsminor.yy439;
break;
- case 261: /* expr ::= expr REM expr */
+ case 258: /* expr ::= expr AND expr */
#line 741 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_REM); }
-#line 3815 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 262: /* expr ::= expr LIKE expr */
-#line 744 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LIKE); }
-#line 3821 "sql.c"
- yymsp[-2].minor.yy46 = yylhsminor.yy46;
- break;
- case 263: /* expr ::= expr IN LP exprlist RP */
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_AND);}
+#line 3817 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 259: /* expr ::= expr OR expr */
+#line 742 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_OR); }
+#line 3823 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 260: /* expr ::= expr PLUS expr */
+#line 745 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_PLUS); }
+#line 3829 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 261: /* expr ::= expr MINUS expr */
+#line 746 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MINUS); }
+#line 3835 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 262: /* expr ::= expr STAR expr */
#line 747 "sql.y"
-{yylhsminor.yy46 = tSqlExprCreate(yymsp[-4].minor.yy46, (tSqlExpr*)yymsp[-1].minor.yy131, TK_IN); }
-#line 3827 "sql.c"
- yymsp[-4].minor.yy46 = yylhsminor.yy46;
- break;
- case 264: /* exprlist ::= exprlist COMMA expritem */
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_STAR); }
+#line 3841 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 263: /* expr ::= expr SLASH expr */
+#line 748 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_DIVIDE);}
+#line 3847 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 264: /* expr ::= expr REM expr */
+#line 749 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_REM); }
+#line 3853 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 265: /* expr ::= expr LIKE expr */
+#line 752 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LIKE); }
+#line 3859 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 266: /* expr ::= expr MATCH expr */
#line 755 "sql.y"
-{yylhsminor.yy131 = tSqlExprListAppend(yymsp[-2].minor.yy131,yymsp[0].minor.yy46,0, 0);}
-#line 3833 "sql.c"
- yymsp[-2].minor.yy131 = yylhsminor.yy131;
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MATCH); }
+#line 3865 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
break;
- case 265: /* exprlist ::= expritem */
+ case 267: /* expr ::= expr NMATCH expr */
#line 756 "sql.y"
-{yylhsminor.yy131 = tSqlExprListAppend(0,yymsp[0].minor.yy46,0, 0);}
-#line 3839 "sql.c"
- yymsp[0].minor.yy131 = yylhsminor.yy131;
- break;
- case 266: /* expritem ::= expr */
-#line 757 "sql.y"
-{yylhsminor.yy46 = yymsp[0].minor.yy46;}
-#line 3845 "sql.c"
- yymsp[0].minor.yy46 = yylhsminor.yy46;
- break;
- case 268: /* cmd ::= RESET QUERY CACHE */
-#line 761 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NMATCH); }
+#line 3871 "sql.c"
+ yymsp[-2].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 268: /* expr ::= expr IN LP exprlist RP */
+#line 759 "sql.y"
+{yylhsminor.yy439 = tSqlExprCreate(yymsp[-4].minor.yy439, (tSqlExpr*)yymsp[-1].minor.yy421, TK_IN); }
+#line 3877 "sql.c"
+ yymsp[-4].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 269: /* exprlist ::= exprlist COMMA expritem */
+#line 767 "sql.y"
+{yylhsminor.yy421 = tSqlExprListAppend(yymsp[-2].minor.yy421,yymsp[0].minor.yy439,0, 0);}
+#line 3883 "sql.c"
+ yymsp[-2].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 270: /* exprlist ::= expritem */
+#line 768 "sql.y"
+{yylhsminor.yy421 = tSqlExprListAppend(0,yymsp[0].minor.yy439,0, 0);}
+#line 3889 "sql.c"
+ yymsp[0].minor.yy421 = yylhsminor.yy421;
+ break;
+ case 271: /* expritem ::= expr */
+#line 769 "sql.y"
+{yylhsminor.yy439 = yymsp[0].minor.yy439;}
+#line 3895 "sql.c"
+ yymsp[0].minor.yy439 = yylhsminor.yy439;
+ break;
+ case 273: /* cmd ::= RESET QUERY CACHE */
+#line 773 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
-#line 3851 "sql.c"
+#line 3901 "sql.c"
break;
- case 269: /* cmd ::= SYNCDB ids REPLICA */
-#line 764 "sql.y"
+ case 274: /* cmd ::= SYNCDB ids REPLICA */
+#line 776 "sql.y"
{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);}
-#line 3856 "sql.c"
+#line 3906 "sql.c"
break;
- case 270: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
-#line 767 "sql.y"
+ case 275: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+#line 779 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3865 "sql.c"
+#line 3915 "sql.c"
break;
- case 271: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
-#line 773 "sql.y"
+ case 276: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+#line 785 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3874,28 +3924,28 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3878 "sql.c"
+#line 3928 "sql.c"
break;
- case 272: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
-#line 783 "sql.y"
+ case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+#line 795 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3887 "sql.c"
+#line 3937 "sql.c"
break;
- case 273: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
-#line 790 "sql.y"
+ case 278: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+#line 802 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3896 "sql.c"
+#line 3946 "sql.c"
break;
- case 274: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
-#line 795 "sql.y"
+ case 279: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+#line 807 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3905,10 +3955,10 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3909 "sql.c"
+#line 3959 "sql.c"
break;
- case 275: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
-#line 805 "sql.y"
+ case 280: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+#line 817 "sql.y"
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -3921,42 +3971,42 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3925 "sql.c"
+#line 3975 "sql.c"
break;
- case 276: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
-#line 818 "sql.y"
+ case 281: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+#line 830 "sql.y"
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
toTSDBType(yymsp[-2].minor.yy0.type);
SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1);
- A = tVariantListAppend(A, &yymsp[0].minor.yy516, -1);
+ A = tVariantListAppend(A, &yymsp[0].minor.yy430, -1);
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3939 "sql.c"
+#line 3989 "sql.c"
break;
- case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
-#line 829 "sql.y"
+ case 282: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+#line 841 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3948 "sql.c"
+#line 3998 "sql.c"
break;
- case 278: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
-#line 836 "sql.y"
+ case 283: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+#line 848 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3957 "sql.c"
+#line 4007 "sql.c"
break;
- case 279: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
-#line 842 "sql.y"
+ case 284: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+#line 854 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3966,28 +4016,28 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3970 "sql.c"
+#line 4020 "sql.c"
break;
- case 280: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
-#line 852 "sql.y"
+ case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+#line 864 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3979 "sql.c"
+#line 4029 "sql.c"
break;
- case 281: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
-#line 859 "sql.y"
+ case 286: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+#line 871 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 3988 "sql.c"
+#line 4038 "sql.c"
break;
- case 282: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
-#line 864 "sql.y"
+ case 287: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+#line 876 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
@@ -3997,10 +4047,10 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 4001 "sql.c"
+#line 4051 "sql.c"
break;
- case 283: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
-#line 874 "sql.y"
+ case 288: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+#line 886 "sql.y"
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
@@ -4013,45 +4063,45 @@ static YYACTIONTYPE yy_reduce(
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 4017 "sql.c"
+#line 4067 "sql.c"
break;
- case 284: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
-#line 887 "sql.y"
+ case 289: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+#line 899 "sql.y"
{
yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n;
toTSDBType(yymsp[-2].minor.yy0.type);
SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1);
- A = tVariantListAppend(A, &yymsp[0].minor.yy516, -1);
+ A = tVariantListAppend(A, &yymsp[0].minor.yy430, -1);
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 4031 "sql.c"
+#line 4081 "sql.c"
break;
- case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
-#line 898 "sql.y"
+ case 290: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+#line 910 "sql.y"
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
-#line 4040 "sql.c"
+#line 4090 "sql.c"
break;
- case 286: /* cmd ::= KILL CONNECTION INTEGER */
-#line 905 "sql.y"
+ case 291: /* cmd ::= KILL CONNECTION INTEGER */
+#line 917 "sql.y"
{setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);}
-#line 4045 "sql.c"
+#line 4095 "sql.c"
break;
- case 287: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
-#line 906 "sql.y"
+ case 292: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */
+#line 918 "sql.y"
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);}
-#line 4050 "sql.c"
+#line 4100 "sql.c"
break;
- case 288: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
-#line 907 "sql.y"
+ case 293: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */
+#line 919 "sql.y"
{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);}
-#line 4055 "sql.c"
+#line 4105 "sql.c"
break;
default:
break;
@@ -4136,7 +4186,7 @@ static void yy_syntax_error(
}
assert(len <= outputBufLen);
-#line 4140 "sql.c"
+#line 4190 "sql.c"
/************ End %syntax_error code ******************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
ParseCTX_STORE
@@ -4163,7 +4213,7 @@ static void yy_accept(
** parser accepts */
/*********** Begin %parse_accept code *****************************************/
#line 61 "sql.y"
-#line 4167 "sql.c"
+#line 4217 "sql.c"
/*********** End %parse_accept code *******************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
ParseCTX_STORE
diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt
index 349d511f1570e3df835494ebd4e3e86d7795c873..8c4b9c2e6a2e9a5f6835baf411ecc94e6889fcbe 100644
--- a/src/query/tests/CMakeLists.txt
+++ b/src/query/tests/CMakeLists.txt
@@ -18,7 +18,7 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR))
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(queryTest ${SOURCE_LIST})
- TARGET_LINK_LIBRARIES(queryTest taos query gtest pthread)
+ TARGET_LINK_LIBRARIES(queryTest taos cJson query gtest pthread)
ENDIF()
SET_SOURCE_FILES_PROPERTIES(./astTest.cpp PROPERTIES COMPILE_FLAGS -w)
diff --git a/src/query/tests/cSortTest.cpp b/src/query/tests/cSortTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa5aa89afc211678cfc521dccd46fbdb533fbff1
--- /dev/null
+++ b/src/query/tests/cSortTest.cpp
@@ -0,0 +1,124 @@
+#include
+#include
+
+#include "taos.h"
+#include "tsdb.h"
+#include "qExtbuffer.h"
+
+#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+
+namespace {
+ int32_t comp(const void* p1, const void* p2) {
+ int32_t* x1 = (int32_t*) p1;
+ int32_t* x2 = (int32_t*) p2;
+
+ if (*x1 == *x2) {
+ return 0;
+ } else {
+ return (*x1 > *x2)? 1:-1;
+ }
+ }
+
+ int32_t comp1(const void* p1, const void* p2) {
+ int32_t ret = strncmp((char*) p1, (char*) p2, 20);
+
+ if (ret == 0) {
+ return 0;
+ } else {
+ return ret > 0 ? 1:-1;
+ }
+ }
+}
+
+TEST(testCase, colunmnwise_sort_test) {
+ // void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn)
+ void* pCols[2] = {0};
+
+ SSchema s[2] = {{0}};
+ s[0].type = TSDB_DATA_TYPE_INT;
+ s[0].bytes = 4;
+ s[0].colId = 0;
+ strcpy(s[0].name, "col1");
+
+ s[1].type = TSDB_DATA_TYPE_BINARY;
+ s[1].bytes = 20;
+ s[1].colId = 1;
+ strcpy(s[1].name, "col2");
+
+ int32_t* p = (int32_t*) calloc(5, sizeof(int32_t));
+ p[0] = 12;
+ p[1] = 8;
+ p[2] = 99;
+ p[3] = 7;
+ p[4] = 1;
+
+ char* t1 = (char*) calloc(5, 20);
+ strcpy(t1, "abc");
+ strcpy(t1 + 20, "def");
+ strcpy(t1 + 40, "xyz");
+ strcpy(t1 + 60, "klm");
+ strcpy(t1 + 80, "hij");
+
+ pCols[0] = (char*) p;
+ pCols[1] = (char*) t1;
+ taoscQSort(reinterpret_cast(pCols), s, 2, 5, 0, comp);
+
+ int32_t* px = (int32_t*) pCols[0];
+ ASSERT_EQ(px[0], 1);
+ ASSERT_EQ(px[1], 7);
+ ASSERT_EQ(px[2], 8);
+ ASSERT_EQ(px[3], 12);
+ ASSERT_EQ(px[4], 99);
+
+ char* px1 = (char*) pCols[1];
+ ASSERT_STRCASEEQ(px1 + 20 * 0, "hij");
+ ASSERT_STRCASEEQ(px1 + 20 * 1, "klm");
+ ASSERT_STRCASEEQ(px1 + 20 * 2, "def");
+ ASSERT_STRCASEEQ(px1 + 20 * 3, "abc");
+ ASSERT_STRCASEEQ(px1 + 20 * 4, "xyz");
+
+ taoscQSort(pCols, s, 2, 5, 1, comp1);
+ px = (int32_t*) pCols[0];
+ ASSERT_EQ(px[0], 12);
+ ASSERT_EQ(px[1], 8);
+ ASSERT_EQ(px[2], 1);
+ ASSERT_EQ(px[3], 7);
+ ASSERT_EQ(px[4], 99);
+
+ px1 = (char*) pCols[1];
+ ASSERT_STRCASEEQ(px1 + 20 * 0, "abc");
+ ASSERT_STRCASEEQ(px1 + 20 * 1, "def");
+ ASSERT_STRCASEEQ(px1 + 20 * 2, "hij");
+ ASSERT_STRCASEEQ(px1 + 20 * 3, "klm");
+ ASSERT_STRCASEEQ(px1 + 20 * 4, "xyz");
+}
+
+TEST(testCase, columnsort_test) {
+ SSchema field[1] = {
+ {TSDB_DATA_TYPE_INT, "k", sizeof(int32_t)},
+ };
+
+ const int32_t num = 2000;
+
+ int32_t *d = (int32_t *)malloc(sizeof(int32_t) * num);
+ for (int32_t i = 0; i < num; ++i) {
+ d[i] = i % 4;
+ }
+
+ const int32_t numOfOrderCols = 1;
+ int32_t orderColIdx = 0;
+ SColumnModel *pModel = createColumnModel(field, 1, 1000);
+ tOrderDescriptor *pDesc = tOrderDesCreate(&orderColIdx, numOfOrderCols, pModel, 1);
+
+ tColDataQSort(pDesc, num, 0, num - 1, (char *)d, 1);
+
+ for (int32_t i = 0; i < num; ++i) {
+ printf("%d\t", d[i]);
+ }
+ printf("\n");
+
+ destroyColumnModel(pModel);
+}
\ No newline at end of file
diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp
index 9f6e219c0aaf6a66c42c45ddcfbb774f0862e74e..1ed4cde40653aaed99031fca81a8719a3f748b6b 100644
--- a/src/query/tests/unitTest.cpp
+++ b/src/query/tests/unitTest.cpp
@@ -1,6 +1,4 @@
-#include "os.h"
#include
-#include
#include
#include "taos.h"
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index c93a3f929d9510ccd8e092b7d924bc541d608581..9ea5fd539244820f111a3fbb3c60aee088e727c5 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -407,7 +407,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64
if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE
|| type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP
|| type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META
- || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS)
+ || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_CM_ALTER_TABLE)
pContext->connType = RPC_CONN_TCPC;
pContext->rid = taosAddRef(tsRpcRefId, pContext);
@@ -1133,8 +1133,8 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
} else {
// for asynchronous API
SRpcEpSet *pEpSet = NULL;
- //if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect)
- pEpSet = &pContext->epSet;
+ if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect)
+ pEpSet = &pContext->epSet;
(*pRpc->cfp)(pMsg, pEpSet);
}
diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt
index c5b77df5a25f9f0b1e9294228520f171b9befddd..efbed6f0a6e8218c3a0b46d2913f6a792bf48ce4 100644
--- a/src/tsdb/CMakeLists.txt
+++ b/src/tsdb/CMakeLists.txt
@@ -2,6 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(tsdb ${SRC})
TARGET_LINK_LIBRARIES(tsdb tfs common tutil)
diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h
index ec6b057aef142fb938993b3a27717c5e64937258..4b650d3993a54f6a98caf00a3605feb37e972ebd 100644
--- a/src/tsdb/inc/tsdbBuffer.h
+++ b/src/tsdb/inc/tsdbBuffer.h
@@ -29,6 +29,7 @@ typedef struct {
int tBufBlocks;
int nBufBlocks;
int nRecycleBlocks;
+ int nElasticBlocks;
int64_t index;
SList* bufBlockList;
} STsdbBufPool;
@@ -41,6 +42,10 @@ int tsdbOpenBufPool(STsdbRepo* pRepo);
void tsdbCloseBufPool(STsdbRepo* pRepo);
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks);
-void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode);
+void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic);
+
+// health cite
+STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize);
+void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock);
#endif /* _TD_TSDB_BUFFER_H_ */
diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h
index d63aeb14ac6ca6cd6f59654cf74f11d8e33d6ce4..e89e10f7667e8aa5388ebfa4d2c5b54f1bf3e57f 100644
--- a/src/tsdb/inc/tsdbFS.h
+++ b/src/tsdb/inc/tsdbFS.h
@@ -18,6 +18,9 @@
#define TSDB_FS_VERSION 0
+// ================== TSDB global config
+extern bool tsdbForceKeepFile;
+
// ================== CURRENT file header info
typedef struct {
uint32_t version; // Current file system version (relating to code)
@@ -42,8 +45,9 @@ typedef struct {
typedef struct {
pthread_rwlock_t lock;
- SFSStatus* cstatus; // current status
- SHashObj* metaCache; // meta cache
+ SFSStatus* cstatus; // current status
+ SHashObj* metaCache; // meta cache
+ SHashObj* metaCacheComp; // meta cache for compact
bool intxn;
SFSStatus* nstatus; // new status
} STsdbFS;
@@ -109,4 +113,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) {
return 0;
}
-#endif /* _TD_TSDB_FS_H_ */
\ No newline at end of file
+#endif /* _TD_TSDB_FS_H_ */
diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h
new file mode 100644
index 0000000000000000000000000000000000000000..324f4312e05fc0ca0200c319728bf692bf476bf6
--- /dev/null
+++ b/src/tsdb/inc/tsdbHealth.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _TD_TSDB_HEALTH_H_
+#define _TD_TSDB_HEALTH_H_
+
+bool tsdbUrgeQueryFree(STsdbRepo* pRepo);
+int32_t tsdbInsertNewBlock(STsdbRepo* pRepo);
+
+bool tsdbIdleMemEnough();
+bool tsdbAllowNewBlock(STsdbRepo* pRepo);
+
+#endif /* _TD_TSDB_BUFFER_H_ */
diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h
index 51801c843c279f10e9e0895a0f2dee2839a3f6a2..8ce5e7ade80b2006ac8c39fec178994073c5a26d 100644
--- a/src/tsdb/inc/tsdbMeta.h
+++ b/src/tsdb/inc/tsdbMeta.h
@@ -100,7 +100,7 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) {
- STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
+ STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h
index 532907ae01be576e40feea2969761846f07170b3..80e92975799f47d68ff72ef80a52efb6fe901b5e 100644
--- a/src/tsdb/inc/tsdbint.h
+++ b/src/tsdb/inc/tsdbint.h
@@ -97,6 +97,7 @@ struct STsdbRepo {
SMergeBuf mergeBuf; //used when update=2
int8_t compactState; // compact state: inCompact/noCompact/waitingCompact?
+ pthread_t* pthread;
};
#define REPO_ID(r) (r)->config.tsdbId
diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c
index e675bf6f9de04021112d43a1db70cf56cf430f08..70589031f6516a129a5a683b0e76edb23b814e15 100644
--- a/src/tsdb/src/tsdbBuffer.c
+++ b/src/tsdb/src/tsdbBuffer.c
@@ -14,12 +14,10 @@
*/
#include "tsdbint.h"
+#include "tsdbHealth.h"
#define POOL_IS_EMPTY(b) (listNEles((b)->bufBlockList) == 0)
-static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize);
-static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock);
-
// ---------------- INTERNAL FUNCTIONS ----------------
STsdbBufPool *tsdbNewBufPool() {
STsdbBufPool *pBufPool = (STsdbBufPool *)calloc(1, sizeof(*pBufPool));
@@ -65,10 +63,10 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) {
STsdbBufPool *pPool = pRepo->pPool;
ASSERT(pPool != NULL);
-
pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB
pPool->tBufBlocks = pCfg->totalBlocks;
pPool->nBufBlocks = 0;
+ pPool->nElasticBlocks = 0;
pPool->index = 0;
pPool->nRecycleBlocks = 0;
@@ -120,6 +118,18 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) {
STsdbBufPool *pBufPool = pRepo->pPool;
while (POOL_IS_EMPTY(pBufPool)) {
+ if(tsDeadLockKillQuery) {
+ // supply new Block
+ if(tsdbInsertNewBlock(pRepo) > 0) {
+ tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d cur free Blocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->bufBlockList->numOfEles);
+ break;
+ } else {
+ // no newBlock, kill query free
+ if(!tsdbUrgeQueryFree(pRepo))
+ tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo));
+ }
+ }
+
pRepo->repoLocked = false;
pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex));
pRepo->repoLocked = true;
@@ -139,11 +149,11 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) {
}
// ---------------- LOCAL FUNCTIONS ----------------
-static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) {
+STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) {
STsdbBufBlock *pBufBlock = (STsdbBufBlock *)malloc(sizeof(*pBufBlock) + bufBlockSize);
if (pBufBlock == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
- goto _err;
+ return NULL;
}
pBufBlock->blockId = 0;
@@ -151,13 +161,9 @@ static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) {
pBufBlock->remain = bufBlockSize;
return pBufBlock;
-
-_err:
- tsdbFreeBufBlock(pBufBlock);
- return NULL;
}
-static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); }
+ void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); }
int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) {
if (oldTotalBlocks == pRepo->config.totalBlocks) {
@@ -193,10 +199,16 @@ err:
return err;
}
-void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) {
+void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic) {
STsdbBufBlock *pBufBlock = NULL;
tdListNodeGetData(pPool->bufBlockList, pNode, (void *)(&pBufBlock));
tsdbFreeBufBlock(pBufBlock);
free(pNode);
- pPool->nBufBlocks--;
-}
+ if(bELastic)
+ {
+ pPool->nElasticBlocks--;
+ tsdbWarn("pPool=%p elastic block reduce one . nElasticBlocks=%d cur free Blocks=%d", pPool, pPool->nElasticBlocks, pPool->bufBlockList->numOfEles);
+ }
+ else
+ pPool->nBufBlocks--;
+}
\ No newline at end of file
diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c
index 8f5f885d692f723bc1690a708571093bbe0a2717..03110487807076bf8ac2ac7026ffdb828ea4c7c6 100644
--- a/src/tsdb/src/tsdbCommit.c
+++ b/src/tsdb/src/tsdbCommit.c
@@ -14,6 +14,8 @@
*/
#include "tsdbint.h"
+extern int32_t tsTsdbMetaCompactRatio;
+
#define TSDB_MAX_SUBBLOCKS 8
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
@@ -55,8 +57,9 @@ typedef struct {
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
static int tsdbCommitMeta(STsdbRepo *pRepo);
-static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen);
+static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact);
static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid);
+static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile);
static int tsdbCommitTSData(STsdbRepo *pRepo);
static void tsdbStartCommit(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
@@ -261,6 +264,35 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
// =================== Commit Meta Data
+static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
+ STsdbFS * pfs = REPO_FS(pRepo);
+ SMFile * pOMFile = pfs->cstatus->pmf;
+ SDiskID did;
+
+ // Create/Open a meta file or open the existing file
+ if (pOMFile == NULL) {
+ // Create a new meta file
+ did.level = TFS_PRIMARY_LEVEL;
+ did.id = TFS_PRIMARY_ID;
+ tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
+
+ if (open && tsdbCreateMFile(pMf, true) < 0) {
+ tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
+ return -1;
+ }
+
+ tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf));
+ } else {
+ tsdbInitMFileEx(pMf, pOMFile);
+ if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) {
+ tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int tsdbCommitMeta(STsdbRepo *pRepo) {
STsdbFS * pfs = REPO_FS(pRepo);
SMemTable *pMem = pRepo->imem;
@@ -269,34 +301,25 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
SActObj * pAct = NULL;
SActCont * pCont = NULL;
SListNode *pNode = NULL;
- SDiskID did;
ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0);
if (listNEles(pMem->actList) <= 0) {
// no meta data to commit, just keep the old meta file
tsdbUpdateMFile(pfs, pOMFile);
- return 0;
- } else {
- // Create/Open a meta file or open the existing file
- if (pOMFile == NULL) {
- // Create a new meta file
- did.level = TFS_PRIMARY_LEVEL;
- did.id = TFS_PRIMARY_ID;
- tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
-
- if (tsdbCreateMFile(&mf, true) < 0) {
- tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
+ if (tsTsdbMetaCompactRatio > 0) {
+ if (tsdbInitCommitMetaFile(pRepo, &mf, false) < 0) {
return -1;
}
+ int ret = tsdbCompactMetaFile(pRepo, pfs, &mf);
+ if (ret < 0) tsdbError("compact meta file error");
- tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
- } else {
- tsdbInitMFileEx(&mf, pOMFile);
- if (tsdbOpenMFile(&mf, O_WRONLY) < 0) {
- tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
- return -1;
- }
+ return ret;
+ }
+ return 0;
+ } else {
+ if (tsdbInitCommitMetaFile(pRepo, &mf, true) < 0) {
+ return -1;
}
}
@@ -305,7 +328,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
pAct = (SActObj *)pNode->data;
if (pAct->act == TSDB_UPDATE_META) {
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
- if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
+ if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, false) < 0) {
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno));
tsdbCloseMFile(&mf);
@@ -338,6 +361,10 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
tsdbCloseMFile(&mf);
tsdbUpdateMFile(pfs, &mf);
+ if (tsTsdbMetaCompactRatio > 0 && tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) {
+ tsdbError("compact meta file error");
+ }
+
return 0;
}
@@ -375,7 +402,7 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) {
pRtn->minFid, pRtn->midFid, pRtn->maxFid);
}
-static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) {
+static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact) {
char buf[64] = "\0";
void * pBuf = buf;
SKVRecord rInfo;
@@ -401,13 +428,18 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void
}
tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)));
- SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid));
+
+ SHashObj* cache = compact ? pfs->metaCacheComp : pfs->metaCache;
+
+ pMFile->info.nRecords++;
+
+ SKVRecord *pRecord = taosHashGet(cache, (void *)&uid, sizeof(uid));
if (pRecord != NULL) {
pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord));
} else {
pMFile->info.nRecords++;
}
- taosHashPut(pfs->metaCache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
+ taosHashPut(cache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
return 0;
}
@@ -442,6 +474,129 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) {
return 0;
}
+static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) {
+ float delPercent = (float)(pMFile->info.nDels) / (float)(pMFile->info.nRecords);
+ float tombPercent = (float)(pMFile->info.tombSize) / (float)(pMFile->info.size);
+ float compactRatio = (float)(tsTsdbMetaCompactRatio)/100;
+
+ if (delPercent < compactRatio && tombPercent < compactRatio) {
+ return 0;
+ }
+
+ if (tsdbOpenMFile(pMFile, O_RDONLY) < 0) {
+ tsdbError("open meta file %s compact fail", pMFile->f.rname);
+ return -1;
+ }
+
+ tsdbInfo("begin compact tsdb meta file, ratio:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64,
+ tsTsdbMetaCompactRatio, pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size);
+
+ SMFile mf;
+ SDiskID did;
+
+ // first create tmp meta file
+ did.level = TFS_PRIMARY_LEVEL;
+ did.id = TFS_PRIMARY_ID;
+ tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)) + 1);
+
+ if (tsdbCreateMFile(&mf, true) < 0) {
+ tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
+ return -1;
+ }
+
+ tsdbInfo("vgId:%d meta file %s is created to compact meta data", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
+
+ // second iterator metaCache
+ int code = -1;
+ int64_t maxBufSize = 1024;
+ SKVRecord *pRecord;
+ void *pBuf = NULL;
+
+ pBuf = malloc((size_t)maxBufSize);
+ if (pBuf == NULL) {
+ goto _err;
+ }
+
+ // init Comp
+ assert(pfs->metaCacheComp == NULL);
+ pfs->metaCacheComp = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if (pfs->metaCacheComp == NULL) {
+ goto _err;
+ }
+
+ pRecord = taosHashIterate(pfs->metaCache, NULL);
+ while (pRecord) {
+ if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) {
+ tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
+ tstrerror(terrno));
+ goto _err;
+ }
+ if (pRecord->size > maxBufSize) {
+ maxBufSize = pRecord->size;
+ void* tmp = realloc(pBuf, (size_t)maxBufSize);
+ if (tmp == NULL) {
+ goto _err;
+ }
+ pBuf = tmp;
+ }
+ int nread = (int)tsdbReadMFile(pMFile, pBuf, pRecord->size);
+ if (nread < 0) {
+ tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
+ tstrerror(terrno));
+ goto _err;
+ }
+
+ if (nread < pRecord->size) {
+ tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d",
+ REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread);
+ goto _err;
+ }
+
+ if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, true) < 0) {
+ tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid,
+ tstrerror(terrno));
+ goto _err;
+ }
+
+ pRecord = taosHashIterate(pfs->metaCache, pRecord);
+ }
+ code = 0;
+
+_err:
+ if (code == 0) TSDB_FILE_FSYNC(&mf);
+ tsdbCloseMFile(&mf);
+ tsdbCloseMFile(pMFile);
+
+ if (code == 0) {
+ // rename meta.tmp -> meta
+ tsdbInfo("vgId:%d meta file rename %s -> %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf), TSDB_FILE_FULL_NAME(pMFile));
+ taosRename(mf.f.aname,pMFile->f.aname);
+ tstrncpy(mf.f.aname, pMFile->f.aname, TSDB_FILENAME_LEN);
+ tstrncpy(mf.f.rname, pMFile->f.rname, TSDB_FILENAME_LEN);
+ // update current meta file info
+ pfs->nstatus->pmf = NULL;
+ tsdbUpdateMFile(pfs, &mf);
+
+ taosHashCleanup(pfs->metaCache);
+ pfs->metaCache = pfs->metaCacheComp;
+ pfs->metaCacheComp = NULL;
+ } else {
+ // remove meta.tmp file
+ remove(mf.f.aname);
+ taosHashCleanup(pfs->metaCacheComp);
+ pfs->metaCacheComp = NULL;
+ }
+
+ tfree(pBuf);
+
+ ASSERT(mf.info.nDels == 0);
+ ASSERT(mf.info.tombSize == 0);
+
+ tsdbInfo("end compact tsdb meta file,code:%d,nRecords:%" PRId64 ",size:%" PRId64,
+ code,mf.info.nRecords,mf.info.size);
+ return code;
+}
+
// =================== Commit Time-Series Data
static int tsdbCommitTSData(STsdbRepo *pRepo) {
SMemTable *pMem = pRepo->imem;
@@ -1263,13 +1418,11 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
while (true) {
key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter);
- bool isRowDel = false;
SMemRow row = tsdbNextIterRow(pCommitIter->pIter);
if (row == NULL || memRowKey(row) > maxKey) {
key2 = INT64_MAX;
} else {
key2 = memRowKey(row);
- isRowDel = memRowDeleted(row);
}
if (key1 == INT64_MAX && key2 == INT64_MAX) break;
@@ -1284,36 +1437,33 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
pTarget->numOfRows++;
(*iter)++;
} else if (key1 > key2) {
- if (!isRowDel) {
- if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
- pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
- ASSERT(pSchema != NULL);
- }
-
- tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
+ if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
+ pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
+ ASSERT(pSchema != NULL);
}
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
+
tSkipListIterNext(pCommitIter->pIter);
} else {
- if (update) {
- if (!isRowDel) {
- if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
- pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
- ASSERT(pSchema != NULL);
- }
-
- tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
- }
- } else {
- ASSERT(!isRowDel);
-
+ if (update != TD_ROW_OVERWRITE_UPDATE) {
+ //copy disk data
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints);
}
- pTarget->numOfRows++;
+ if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++;
+ }
+ if (update != TD_ROW_DISCARD_UPDATE) {
+ //copy mem data
+ if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) {
+ pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row));
+ ASSERT(pSchema != NULL);
+ }
+
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
}
(*iter)++;
tSkipListIterNext(pCommitIter->pIter);
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index 68450301d8f0c8536327e593d87030920f27ff49..a40e67ca590082dcb7925ab167d7d2c5165f8017 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -38,7 +38,6 @@ static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo);
// For backward compatibility
-bool tsdbForceKeepFile = false;
// ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0;
@@ -217,6 +216,7 @@ STsdbFS *tsdbNewFS(STsdbCfg *pCfg) {
}
pfs->intxn = false;
+ pfs->metaCacheComp = NULL;
pfs->nstatus = tsdbNewFSStatus(maxFSet);
if (pfs->nstatus == NULL) {
@@ -1354,4 +1354,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) {
tsdbCloseDFileSet(&fset);
}
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c
index 50fa393e9fc9060941e181f8043d84070d036f98..0f13b6108f6558ab7948df01e38b0c3fd0d2cd9a 100644
--- a/src/tsdb/src/tsdbFile.c
+++ b/src/tsdb/src/tsdbFile.c
@@ -16,11 +16,11 @@
#include "tsdbint.h"
static const char *TSDB_FNAME_SUFFIX[] = {
- "head", // TSDB_FILE_HEAD
- "data", // TSDB_FILE_DATA
- "last", // TSDB_FILE_LAST
- "", // TSDB_FILE_MAX
- "meta" // TSDB_FILE_META
+ "head", // TSDB_FILE_HEAD
+ "data", // TSDB_FILE_DATA
+ "last", // TSDB_FILE_LAST
+ "", // TSDB_FILE_MAX
+ "meta", // TSDB_FILE_META
};
static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname);
diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c
new file mode 100644
index 0000000000000000000000000000000000000000..8198c480334912b1ce373ceca7b82409f5a644f2
--- /dev/null
+++ b/src/tsdb/src/tsdbHealth.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+#include "taosmsg.h"
+#include "tarray.h"
+#include "query.h"
+#include "tglobal.h"
+#include "tlist.h"
+#include "tsdbint.h"
+#include "tsdbBuffer.h"
+#include "tsdbLog.h"
+#include "tsdbHealth.h"
+#include "ttimer.h"
+#include "tthread.h"
+
+
+// return malloc new block count
+int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) {
+ STsdbBufPool *pPool = pRepo->pPool;
+ int32_t cnt = 0;
+
+ if(tsdbAllowNewBlock(pRepo)) {
+ STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize);
+ if (pBufBlock) {
+ if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) {
+ // append error
+ tsdbFreeBufBlock(pBufBlock);
+ } else {
+ pPool->nElasticBlocks ++;
+ cnt ++ ;
+ }
+ }
+ }
+ return cnt;
+}
+
+// switch anther thread to run
+void* cbKillQueryFree(void* param) {
+ STsdbRepo* pRepo = (STsdbRepo*)param;
+ // vnode
+ if(pRepo->appH.notifyStatus) {
+ pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS);
+ }
+
+ // free
+ if(pRepo->pthread){
+ void* p = pRepo->pthread;
+ pRepo->pthread = NULL;
+ free(p);
+ }
+
+ return NULL;
+}
+
+// return true do free , false do nothing
+bool tsdbUrgeQueryFree(STsdbRepo * pRepo) {
+ // check previous running
+ if(pRepo->pthread && taosThreadRunning(pRepo->pthread)) {
+ tsdbWarn("vgId:%d pre urge thread is runing. nBlocks=%d nElasticBlocks=%d", REPO_ID(pRepo), pRepo->pPool->nBufBlocks, pRepo->pPool->nElasticBlocks);
+ return false;
+ }
+ // create new
+ pRepo->pthread = taosCreateThread(cbKillQueryFree, pRepo);
+ if(pRepo->pthread == NULL) {
+ tsdbError("vgId:%d create urge thread error.", REPO_ID(pRepo));
+ return false;
+ }
+ return true;
+}
+
+bool tsdbAllowNewBlock(STsdbRepo* pRepo) {
+ int32_t nMaxElastic = pRepo->config.totalBlocks/3;
+ STsdbBufPool* pPool = pRepo->pPool;
+ if(pPool->nElasticBlocks >= nMaxElastic) {
+ tsdbWarn("vgId:%d tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", REPO_ID(pRepo), pPool->nElasticBlocks, nMaxElastic);
+ return false;
+ }
+ return true;
+}
+
+bool tsdbNoProblem(STsdbRepo* pRepo) {
+ if(listNEles(pRepo->pPool->bufBlockList) == 0)
+ return false;
+ return true;
+}
\ No newline at end of file
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index b2e6fe89161d0e9bceaf74a46807f51ec402fb2a..c2021963e0d0c8be4ed42588549153dcd20be63c 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -16,6 +16,8 @@
// no test file errors here
#include "taosdef.h"
#include "tsdbint.h"
+#include "ttimer.h"
+#include "tthread.h"
#define IS_VALID_PRECISION(precision) \
(((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO))
@@ -126,6 +128,10 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) {
terrno = TSDB_CODE_SUCCESS;
tsdbStopStream(pRepo);
+ if(pRepo->pthread){
+ taosDestoryThread(pRepo->pthread);
+ pRepo->pthread = NULL;
+ }
if (toCommit) {
tsdbSyncCommit(repo);
@@ -547,6 +553,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
pRepo->appH = *pAppH;
}
pRepo->repoLocked = false;
+ pRepo->pthread = NULL;
int code = pthread_mutex_init(&(pRepo->mutex), NULL);
if (code != 0) {
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index e766d97a97a5905db87691426d282a219eef9d68..3890dca5b96c26009dcf3ca72205ca4b1725aa29 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -99,17 +99,22 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
STsdbBufPool *pBufPool = pRepo->pPool;
SListNode *pNode = NULL;
- bool recycleBlocks = pBufPool->nRecycleBlocks > 0;
+ bool addNew = false;
if (tsdbLockRepo(pRepo) < 0) return -1;
while ((pNode = tdListPopHead(pMemTable->bufBlockList)) != NULL) {
if (pBufPool->nRecycleBlocks > 0) {
- tsdbRecycleBufferBlock(pBufPool, pNode);
+ tsdbRecycleBufferBlock(pBufPool, pNode, false);
pBufPool->nRecycleBlocks -= 1;
} else {
- tdListAppendNode(pBufPool->bufBlockList, pNode);
+ if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 2) {
+ tsdbRecycleBufferBlock(pBufPool, pNode, true);
+ } else {
+ tdListAppendNode(pBufPool->bufBlockList, pNode);
+ addNew = true;
+ }
}
}
- if (!recycleBlocks) {
+ if (addNew) {
int code = pthread_cond_signal(&pBufPool->poolNotEmpty);
if (code != 0) {
if (tsdbUnlockRepo(pRepo) < 0) return -1;
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 96e86a6d99ce05624d72a557f112fa1aa0919e1f..a311868de6f7254d776f08a4f4a247293609aef5 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -43,6 +43,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
+static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable);
static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
static void tsdbFreeTableSchema(STable *pTable);
@@ -128,21 +129,16 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
tsdbUnlockRepoMeta(pRepo);
// Write to memtable action
- // TODO: refactor duplicate codes
- int tlen = 0;
- void *pBuf = NULL;
if (newSuper || superChanged) {
- tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super);
- pBuf = tsdbAllocBytes(pRepo, tlen);
- if (pBuf == NULL) goto _err;
- void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, super);
- ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
+ // add insert new super table action
+ if (tsdbInsertNewTableAction(pRepo, super) != 0) {
+ goto _err;
+ }
+ }
+ // add insert new table action
+ if (tsdbInsertNewTableAction(pRepo, table) != 0) {
+ goto _err;
}
- tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table);
- pBuf = tsdbAllocBytes(pRepo, tlen);
- if (pBuf == NULL) goto _err;
- void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, table);
- ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
if (tsdbCheckCommit(pRepo) < 0) return -1;
@@ -383,7 +379,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
tdDestroyTSchemaBuilder(&schemaBuilder);
}
- // Chage in memory
+ // Change in memory
if (pNewSchema != NULL) { // change super table tag schema
TSDB_WLOCK_TABLE(pTable->pSuper);
STSchema *pOldSchema = pTable->pSuper->tagSchema;
@@ -426,6 +422,21 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
}
// ------------------ INTERNAL FUNCTIONS ------------------
+static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable) {
+ int tlen = 0;
+ void *pBuf = NULL;
+
+ tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
+ pBuf = tsdbAllocBytes(pRepo, tlen);
+ if (pBuf == NULL) {
+ return -1;
+ }
+ void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, pTable);
+ ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
+
+ return 0;
+}
+
STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
STsdbMeta *pMeta = (STsdbMeta *)calloc(1, sizeof(*pMeta));
if (pMeta == NULL) {
@@ -617,6 +628,7 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
if (pTable->lastCols == NULL) {
return -1;
}
+ // TODO: use binary search instead
for (int16_t i = 0; i < pTable->maxColNum; ++i) {
if (pTable->lastCols[i].colId == colId) {
return i;
@@ -734,10 +746,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
TSDB_WUNLOCK_TABLE(pCTable);
if (insertAct) {
- int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable);
- void *buf = tsdbAllocBytes(pRepo, tlen);
- ASSERT(buf != NULL);
- tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pCTable);
+ if (tsdbInsertNewTableAction(pRepo, pCTable) != 0) {
+ tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " tsdbInsertNewTableAction fail", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
+ TABLE_TID(pTable), TABLE_UID(pTable));
+ }
}
}
@@ -1250,8 +1262,14 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable));
tlen += tdEncodeKVRow(buf, pTable->tagVal);
} else {
- tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema));
- for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) {
+ uint32_t arraySize = (uint32_t)taosArrayGetSize(pTable->schema);
+ if(arraySize > UINT8_MAX) {
+ tlen += taosEncodeFixedU8(buf, 0);
+ tlen += taosEncodeFixedU32(buf, arraySize);
+ } else {
+ tlen += taosEncodeFixedU8(buf, (uint8_t)arraySize);
+ }
+ for (uint32_t i = 0; i < arraySize; i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tlen += tdEncodeSchema(buf, pSchema);
}
@@ -1284,8 +1302,11 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable));
buf = tdDecodeKVRow(buf, &(pTable->tagVal));
} else {
- uint8_t nSchemas;
- buf = taosDecodeFixedU8(buf, &nSchemas);
+ uint32_t nSchemas = 0;
+ buf = taosDecodeFixedU8(buf, (uint8_t *)&nSchemas);
+ if(nSchemas == 0) {
+ buf = taosDecodeFixedU32(buf, &nSchemas);
+ }
for (int i = 0; i < nSchemas; i++) {
STSchema *pSchema;
buf = tdDecodeSchema(buf, &pSchema);
@@ -1485,4 +1506,4 @@ static void tsdbFreeTableSchema(STable *pTable) {
taosArrayDestroy(pTable->schema);
}
-}
\ No newline at end of file
+}
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index e1d40aa7d046c4fce79d76bcbea36ee3f635163a..4aab9dff7debc0b0f193e38d77222f1752196c65 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -25,6 +25,7 @@
#include "tlosertree.h"
#include "tsdbint.h"
#include "texpr.h"
+#include "qFilter.h"
#define EXTRA_BYTES 2
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
@@ -167,6 +168,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
static void* doFreeColumnInfoData(SArray* pColumnInfoData);
static void* destroyTableCheckInfo(SArray* pTableCheckInfo);
static bool tsdbGetExternalRow(TsdbQueryHandleT pHandle);
+static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
pBlockLoadInfo->slot = -1;
@@ -288,8 +290,6 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j);
STableCheckInfo info = { .lastKey = pKeyInfo->lastKey, .pTableObj = pKeyInfo->pTable };
- info.tableId = ((STable*)(pKeyInfo->pTable))->tableId;
-
assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE ||
info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE));
@@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) {
STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
+ if (pNew->numOfTables == 0) {
+ tsdbDebug("update query time range to invalidate time window");
+
+ assert(taosArrayGetSize(pNew->pGroupList) == 0);
+ bool asc = ASCENDING_TRAVERSE(pCond->order);
+ if (asc) {
+ pCond->twindow.ekey = pCond->twindow.skey - 1;
+ } else {
+ pCond->twindow.skey = pCond->twindow.ekey - 1;
+ }
+ }
+
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef);
pQueryHandle->loadExternalRow = true;
pQueryHandle->currentLoadExternalRows = true;
@@ -1560,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int32_t numOfColsOfRow1 = 0;
if (pSchema1 == NULL) {
- pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1));
+ pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
}
if(isRow1DataRow) {
numOfColsOfRow1 = schemaNCols(pSchema1);
@@ -1572,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
if(row2) {
isRow2DataRow = isDataRow(row2);
if (pSchema2 == NULL) {
- pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2));
+ pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
}
if(isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2);
@@ -2206,7 +2218,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
SBlock* pBlock = pTableCheck->pCompInfo->blocks;
sup.numOfBlocksPerTable[numOfQualTables] = pTableCheck->numOfBlocks;
- char* buf = calloc(1, sizeof(STableBlockInfo) * pTableCheck->numOfBlocks);
+ char* buf = malloc(sizeof(STableBlockInfo) * pTableCheck->numOfBlocks);
if (buf == NULL) {
cleanBlockOrderSupporter(&sup, numOfQualTables);
return TSDB_CODE_TDB_OUT_OF_MEMORY;
@@ -2448,7 +2460,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
// current file are not overlapped with query time window, ignore remain files
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
- (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
+ (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle,
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
@@ -2678,21 +2690,6 @@ static int32_t getAllTableList(STable* pSuperTable, SArray* list) {
return TSDB_CODE_SUCCESS;
}
-static void destroyHelper(void* param) {
- if (param == NULL) {
- return;
- }
-
- tQueryInfo* pInfo = (tQueryInfo*)param;
- if (pInfo->optr != TSDB_RELATION_IN) {
- tfree(pInfo->q);
- } else {
- taosHashCleanup((SHashObj *)(pInfo->q));
- }
-
- free(param);
-}
-
static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) {
if (pQueryHandle->checkFiles) {
// check if the query range overlaps with the file data block
@@ -3462,18 +3459,19 @@ void filterPrepare(void* expr, void* param) {
if (pInfo->optr == TSDB_RELATION_IN) {
int dummy = -1;
- SHashObj *pObj = NULL;
+ SHashObj *pObj = NULL;
if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false);
SArray *arr = (SArray *)(pCond->arr);
for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
char* p = taosArrayGetP(arr, i);
- taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy));
+ strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p));
+ taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy));
}
} else {
buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen);
}
- pInfo->q = (char *)pObj;
+ pInfo->q = (char *)pObj;
} else if (pCond != NULL) {
uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE;
if (size < (uint32_t)pSchema->bytes) {
@@ -3605,8 +3603,6 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
for(int32_t i = 0; i < size; ++i) {
STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i);
- assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE);
-
tsdbRefTable(pKeyInfo->pTable);
STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey};
@@ -3628,103 +3624,8 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
return pTableGroup;
}
-static bool tableFilterFp(const void* pNode, void* param) {
- tQueryInfo* pInfo = (tQueryInfo*) param;
-
- STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
-
- char* val = NULL;
- if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
- val = (char*) TABLE_NAME(pTable);
- } else {
- val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
- }
-
- if (pInfo->optr == TSDB_RELATION_ISNULL || pInfo->optr == TSDB_RELATION_NOTNULL) {
- if (pInfo->optr == TSDB_RELATION_ISNULL) {
- return (val == NULL) || isNull(val, pInfo->sch.type);
- } else if (pInfo->optr == TSDB_RELATION_NOTNULL) {
- return (val != NULL) && (!isNull(val, pInfo->sch.type));
- }
- } else if (pInfo->optr == TSDB_RELATION_IN) {
- int type = pInfo->sch.type;
- if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) {
- int64_t v;
- GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val);
- return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
- } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
- uint64_t v;
- GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val);
- return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
- }
- else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
- double v;
- GET_TYPED_DATA(v, double, pInfo->sch.type, val);
- return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v));
- } else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR){
- return NULL != taosHashGet((SHashObj *)pInfo->q, varDataVal(val), varDataLen(val));
- }
-
- }
-
- int32_t ret = 0;
- if (val == NULL) { //the val is possible to be null, so check it out carefully
- ret = -1; // val is missing in table tags value pairs
- } else {
- ret = pInfo->compare(val, pInfo->q);
- }
-
- switch (pInfo->optr) {
- case TSDB_RELATION_EQUAL: {
- return ret == 0;
- }
- case TSDB_RELATION_NOT_EQUAL: {
- return ret != 0;
- }
- case TSDB_RELATION_GREATER_EQUAL: {
- return ret >= 0;
- }
- case TSDB_RELATION_GREATER: {
- return ret > 0;
- }
- case TSDB_RELATION_LESS_EQUAL: {
- return ret <= 0;
- }
- case TSDB_RELATION_LESS: {
- return ret < 0;
- }
- case TSDB_RELATION_LIKE: {
- return ret == 0;
- }
- case TSDB_RELATION_IN: {
- return ret == 1;
- }
-
- default:
- assert(false);
- }
-
- return true;
-}
-
-static void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param);
-
-static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) {
- // query according to the expression tree
- SExprTraverseSupp supp = {
- .nodeFilterFn = (__result_filter_fn_t) tableFilterFp,
- .setupInfoFn = filterPrepare,
- .pExtInfo = pSTable->tagSchema,
- };
-
- getTableListfromSkipList(pExpr, pSTable->pIndex, pRes, &supp);
- tExprTreeDestroy(pExpr, destroyHelper);
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
- int16_t tagNameRelType, const char* tbnameCond, STableGroupInfo* pGroupInfo,
- SColIndex* pColIndex, int32_t numOfCols) {
+int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
+ STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) {
if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid);
@@ -3750,7 +3651,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
STSchema* pTagSchema = tsdbGetTableTagSchema(pTable);
// no tags and tbname condition, all child tables of this stable are involved
- if (tbnameCond == NULL && (pTagCond == NULL || len == 0)) {
+ if (pTagCond == NULL || len == 0) {
int32_t ret = getAllTableList(pTable, res);
if (ret != TSDB_CODE_SUCCESS) {
tsdbUnlockRepoMeta(tsdb);
@@ -3772,25 +3673,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
tExprNode* expr = NULL;
TRY(TSDB_MAX_TAG_CONDITIONS) {
- expr = exprTreeFromTableName(tbnameCond);
- if (expr == NULL) {
- expr = exprTreeFromBinary(pTagCond, len);
- } else {
- CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
- tExprNode* tagExpr = exprTreeFromBinary(pTagCond, len);
- if (tagExpr != NULL) {
- CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, tagExpr, NULL);
- tExprNode* tbnameExpr = expr;
- expr = calloc(1, sizeof(tExprNode));
- if (expr == NULL) {
- THROW( TSDB_CODE_TDB_OUT_OF_MEMORY );
- }
- expr->nodeType = TSQL_NODE_EXPR;
- expr->_node.optr = (uint8_t)tagNameRelType;
- expr->_node.pLeft = tagExpr;
- expr->_node.pRight = tbnameExpr;
- }
- }
+ expr = exprTreeFromBinary(pTagCond, len);
CLEANUP_EXECUTE();
} CATCH( code ) {
@@ -3802,7 +3685,20 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
// TODO: more error handling
} END_TRY
- doQueryTableList(pTable, res, expr);
+ void *filterInfo = NULL;
+
+ ret = filterInitFromTree(expr, &filterInfo, 0);
+ if (ret != TSDB_CODE_SUCCESS) {
+ terrno = ret;
+ goto _error;
+ }
+
+ tsdbQueryTableList(pTable, res, filterInfo);
+
+ filterFreeInfo(filterInfo);
+
+ tExprTreeDestroy(expr, NULL);
+
pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
@@ -3986,251 +3882,115 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
pGroupList->numOfTables = 0;
}
-static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SArray *pResult, SExprTraverseSupp *param) {
- SSkipListIterator* iter = tSkipListCreateIter(pSkipList);
- // Scan each node in the skiplist by using iterator
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
- if (exprTreeApplyFilter(pExpr, pNode, param)) {
- taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode)));
- }
- }
-
- tSkipListDestroyIter(iter);
-}
-
-typedef struct {
- char* v;
- int32_t optr;
-} SEndPoint;
-
-typedef struct {
- SEndPoint* start;
- SEndPoint* end;
-} SQueryCond;
-
-// todo check for malloc failure
-static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) {
- int32_t optr = queryColInfo->optr;
-
- if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL ||
- optr == TSDB_RELATION_EQUAL || optr == TSDB_RELATION_NOT_EQUAL) {
- pCond->start = calloc(1, sizeof(SEndPoint));
- pCond->start->optr = queryColInfo->optr;
- pCond->start->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
- pCond->end = calloc(1, sizeof(SEndPoint));
- pCond->end->optr = queryColInfo->optr;
- pCond->end->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_IN) {
- pCond->start = calloc(1, sizeof(SEndPoint));
- pCond->start->optr = queryColInfo->optr;
- pCond->start->v = queryColInfo->q;
- } else if (optr == TSDB_RELATION_LIKE) {
- assert(0);
+static FORCE_INLINE int32_t tsdbGetTagDataFromId(void *param, int32_t id, void **data) {
+ STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode *)param));
+
+ if (id == TSDB_TBNAME_COLUMN_INDEX) {
+ *data = TABLE_NAME(pTable);
+ } else {
+ *data = tdGetKVRowValOfCol(pTable->tagVal, id);
}
return TSDB_CODE_SUCCESS;
}
-static void queryIndexedColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) {
- SSkipListIterator* iter = NULL;
-
- SQueryCond cond = {0};
- if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) {
- //todo handle error
- }
-
- if (cond.start != NULL) {
- iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_ASC);
- } else {
- iter = tSkipListCreateIterFromVal(pSkipList, (char*)(cond.end ? cond.end->v: NULL), pSkipList->type, TSDB_ORDER_DESC);
- }
-
- if (cond.start != NULL) {
- int32_t optr = cond.start->optr;
-
- if (optr == TSDB_RELATION_EQUAL) { // equals
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
- int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- if (ret != 0) {
- break;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
- } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal
- bool comp = true;
- int32_t ret = 0;
-
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
-
- if (comp) {
- ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- assert(ret >= 0);
- }
- if (ret == 0 && optr == TSDB_RELATION_GREATER) {
- continue;
- } else {
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- comp = false;
- }
- }
- } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal
- bool comp = true;
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
- comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0);
- if (comp) {
- continue;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
-
- tSkipListDestroyIter(iter);
-
- comp = true;
- iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->type, TSDB_ORDER_DESC);
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
- comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0);
- if (comp) {
- continue;
- }
-
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
+static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* res) {
+ SSkipListIterator* iter = NULL;
+ char *startVal = NULL;
+ int32_t order = 0;
+ int32_t inRange = 0;
+ int32_t flag = 0;
+ bool all = false;
+ int8_t *addToResult = NULL;
- } else if (optr == TSDB_RELATION_IN) {
- while(tSkipListIterNext(iter)) {
- SSkipListNode* pNode = tSkipListIterGet(iter);
+ filterGetIndexedColumnInfo(filterInfo, &startVal, &order, &flag);
- int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v);
- if (ret != 0) {
- break;
- }
+ tsdbDebug("filter index column start, order:%d, flag:%d", order, flag);
- STableKeyInfo info = {.pTable = (void*)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
-
+ while (order) {
+ if (FILTER_GET_FLAG(order, TSDB_ORDER_ASC)) {
+ iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_ASC);
+ FILTER_CLR_FLAG(order, TSDB_ORDER_ASC);
} else {
- assert(0);
+ iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_DESC);
+ FILTER_CLR_FLAG(order, TSDB_ORDER_DESC);
}
- } else {
- int32_t optr = cond.end ? cond.end->optr : TSDB_RELATION_INVALID;
- if (optr == TSDB_RELATION_LESS || optr == TSDB_RELATION_LESS_EQUAL) {
- bool comp = true;
- int32_t ret = 0;
-
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
-
- if (comp) {
- ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.end->v);
- assert(ret <= 0);
- }
+
+ while (tSkipListIterNext(iter)) {
+ SSkipListNode *pNode = tSkipListIterGet(iter);
- if (ret == 0 && optr == TSDB_RELATION_LESS) {
- continue;
- } else {
- STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- comp = false; // no need to compare anymore
- }
+ if (inRange == 0 || !FILTER_GET_FLAG(flag, FI_ACTION_NO_NEED)) {
+ tsdbDebug("filter index column, filter it");
+ filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId);
+ all = filterExecute(filterInfo, 1, &addToResult, NULL, 0);
}
- } else {
- assert(pQueryInfo->optr == TSDB_RELATION_ISNULL || pQueryInfo->optr == TSDB_RELATION_NOTNULL);
+
+ char *pData = SL_GET_NODE_DATA(pNode);
- while (tSkipListIterNext(iter)) {
- SSkipListNode *pNode = tSkipListIterGet(iter);
+ tsdbDebug("filter index column, table:%s, result:%d", ((STable *)pData)->name->data, all);
- bool isnull = isNull(SL_GET_NODE_KEY(pSkipList, pNode), pQueryInfo->sch.type);
- if ((pQueryInfo->optr == TSDB_RELATION_ISNULL && isnull) ||
- (pQueryInfo->optr == TSDB_RELATION_NOTNULL && (!isnull))) {
- STableKeyInfo info = {.pTable = (void *)SL_GET_NODE_DATA(pNode), .lastKey = TSKEY_INITIAL_VAL};
- taosArrayPush(result, &info);
- }
+ if (all || (addToResult && *addToResult)) {
+ STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL};
+ taosArrayPush(res, &info);
+ inRange = 1;
+ } else if (inRange){
+ break;
}
}
+
+ inRange = 0;
+
+ tfree(addToResult);
+ tSkipListDestroyIter(iter);
}
- free(cond.start);
- free(cond.end);
- tSkipListDestroyIter(iter);
+ tsdbDebug("filter index column end");
}
-static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* res, __result_filter_fn_t filterFp) {
+static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray* res) {
SSkipListIterator* iter = tSkipListCreateIter(pSkipList);
+ int8_t *addToResult = NULL;
while (tSkipListIterNext(iter)) {
- bool addToResult = false;
SSkipListNode *pNode = tSkipListIterGet(iter);
+ filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId);
+
char *pData = SL_GET_NODE_DATA(pNode);
- tstr *name = (tstr*) tsdbGetTableName((void*) pData);
-
- // todo speed up by using hash
- if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
- if (pQueryInfo->optr == TSDB_RELATION_IN) {
- addToResult = pQueryInfo->compare(name, pQueryInfo->q);
- } else if (pQueryInfo->optr == TSDB_RELATION_LIKE) {
- addToResult = !pQueryInfo->compare(name, pQueryInfo->q);
- }
- } else {
- addToResult = filterFp(pNode, pQueryInfo);
- }
- if (addToResult) {
+ bool all = filterExecute(filterInfo, 1, &addToResult, NULL, 0);
+
+ if (all || (addToResult && *addToResult)) {
STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL};
taosArrayPush(res, &info);
- }
+ }
}
+ tfree(addToResult);
+
tSkipListDestroyIter(iter);
}
-// Apply the filter expression to each node in the skiplist to acquire the qualified nodes in skip list
-void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *result, SExprTraverseSupp *param) {
- if (pExpr == NULL) {
- return;
- }
-
- tExprNode *pLeft = pExpr->_node.pLeft;
- tExprNode *pRight = pExpr->_node.pRight;
- // column project
- if (pLeft->nodeType != TSQL_NODE_EXPR && pRight->nodeType != TSQL_NODE_EXPR) {
- assert(pLeft->nodeType == TSQL_NODE_COL && (pRight->nodeType == TSQL_NODE_VALUE || pRight->nodeType == TSQL_NODE_DUMMY));
-
- param->setupInfoFn(pExpr, param->pExtInfo);
+static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo) {
+ STSchema* pTSSchema = pTable->tagSchema;
+ bool indexQuery = false;
+ SSkipList *pSkipList = pTable->pIndex;
+
+ filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery);
+
+ if (indexQuery) {
+ queryIndexedColumn(pSkipList, filterInfo, pRes);
+ } else {
+ queryIndexlessColumn(pSkipList, filterInfo, pRes);
+ }
- tQueryInfo *pQueryInfo = pExpr->_node.info;
- if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr != TSDB_RELATION_IN)) {
- queryIndexedColumn(pSkipList, pQueryInfo, result);
- } else {
- queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn);
- }
+ return TSDB_CODE_SUCCESS;
+}
- return;
- }
- // The value of hasPK is always 0.
- uint8_t weight = pLeft->_node.hasPK + pRight->_node.hasPK;
- assert(weight == 0 && pSkipList != NULL && taosArrayGetSize(result) == 0);
- //apply the hierarchical filter expression to every node in skiplist to find the qualified nodes
- applyFilterToSkipListNode(pSkipList, pExpr, result, param);
-}
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index e41b544d00e55f7eece904c5957ef9c06063e6c3..40069d7d273caa14ce3b80467b25d68ea476fb75 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -33,6 +33,7 @@ extern "C" {
#endif
typedef void (*__cache_free_fn_t)(void*);
+typedef void (*__cache_trav_fn_t)(void*, void*);
typedef struct SCacheStatis {
int64_t missCount;
@@ -176,7 +177,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj);
* @param fp
* @return
*/
-void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp);
+void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1);
/**
* stop background refresh worker thread
diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h
index cf61b7165a47e0e46371c7853337702cd9fb0d45..1125516d34c65da1b5d0c47dadd126aa0b1959fa 100644
--- a/src/util/inc/tcompare.h
+++ b/src/util/inc/tcompare.h
@@ -25,7 +25,8 @@ extern "C" {
#define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2
-#define TSDB_PATTERN_STRING_MAX_LEN 100
+#define TSDB_PATTERN_STRING_DEFAULT_LEN 100
+#define TSDB_REGEX_STRING_DEFAULT_LEN 128
#define FLT_COMPAR_TOL_FACTOR 4
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))
@@ -47,7 +48,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con
int32_t doCompare(const char* a, const char* b, int32_t type, size_t size);
-__compar_fn_t getKeyComparFunc(int32_t keyType);
+__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
@@ -82,6 +83,9 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight);
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight);
int32_t compareStrPatternComp(const void* pLeft, const void* pRight);
+int32_t compareStrRegexComp(const void* pLeft, const void* pRight);
+int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight);
+int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight);
int32_t compareFindItemInSet(const void *pLeft, const void* pRight);
int32_t compareWStrPatternComp(const void* pLeft, const void* pRight);
diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h
index f146ec0b8b675527b41dfb2267946193e5e5fe89..2c632d4a17f5394dc28df72414948855b89bc001 100644
--- a/src/util/inc/tconfig.h
+++ b/src/util/inc/tconfig.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-#define TSDB_CFG_MAX_NUM 116 // 110 + 6 with lossy option
+#define TSDB_CFG_MAX_NUM 123
#define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41
@@ -81,7 +81,6 @@ typedef struct {
extern SGlobalCfg tsGlobalConfig[];
extern int32_t tsGlobalConfigNum;
extern char * tsCfgStatusStr[];
-extern bool tsdbForceKeepFile;
void taosReadGlobalLogCfg();
bool taosReadGlobalCfg();
@@ -90,6 +89,7 @@ void taosDumpGlobalCfg();
void taosInitConfigOption(SGlobalCfg cfg);
SGlobalCfg * taosGetConfigOption(const char *option);
+bool taosReadConfigOption(const char *option, char *value, char *value2, char *value3, int8_t cfgStatus, int8_t sourceType);
#ifdef __cplusplus
}
diff --git a/src/util/inc/tlosertree.h b/src/util/inc/tlosertree.h
index 4c731625dd5c7950c321b2180ca913e49362059b..58f2ca8c5c81408b35c2c9435357deeb2b0f13a4 100644
--- a/src/util/inc/tlosertree.h
+++ b/src/util/inc/tlosertree.h
@@ -26,7 +26,7 @@ typedef int (*__merge_compare_fn_t)(const void *, const void *, void *param);
typedef struct SLoserTreeNode {
int32_t index;
- void * pData;
+ void *pData;
} SLoserTreeNode;
typedef struct SLoserTreeInfo {
@@ -34,8 +34,7 @@ typedef struct SLoserTreeInfo {
int32_t totalEntries;
__merge_compare_fn_t comparFn;
void * param;
-
- SLoserTreeNode *pNode;
+ SLoserTreeNode *pNode;
} SLoserTreeInfo;
uint32_t tLoserTreeCreate(SLoserTreeInfo **pTree, int32_t numOfEntries, void *param, __merge_compare_fn_t compareFn);
diff --git a/src/util/inc/tnettest.h b/src/util/inc/tnettest.h
index b7585bd7155421d1f22e5f989dc7d1ae6f8be491..8a03b67628ffd460a4aa95ad4de8110b71472496 100644
--- a/src/util/inc/tnettest.h
+++ b/src/util/inc/tnettest.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-void taosNetTest(char *role, char *host, int port, int pkgLen);
+void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen, int32_t pkgNum, char *pkgType);
#ifdef __cplusplus
}
diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h
new file mode 100644
index 0000000000000000000000000000000000000000..7443ad706dcbef529d857fe823cddd0cc1efbdd3
--- /dev/null
+++ b/src/util/inc/tthread.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_TTHREAD_H
+#define TDENGINE_TTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "os.h"
+#include "taosdef.h"
+
+// create new thread
+pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param);
+// destory thread
+bool taosDestoryThread(pthread_t* pthread);
+// thread running return true
+bool taosThreadRunning(pthread_t* pthread);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_TTHREAD_H
diff --git a/src/util/inc/tutil.h b/src/util/inc/tutil.h
index 7c8fd2ed8353a086ec9f145aaaa30f4db57113cb..6bcfb5de295c5719032b81c23d16ec2b1476349e 100644
--- a/src/util/inc/tutil.h
+++ b/src/util/inc/tutil.h
@@ -32,6 +32,7 @@ char * strnchr(char *haystack, char needle, int32_t len, bool skipquote);
char ** strsplit(char *src, const char *delim, int32_t *num);
char * strtolower(char *dst, const char *src);
char * strntolower(char *dst, const char *src, int32_t n);
+char * strntolower_s(char *dst, const char *src, int32_t n);
int64_t strnatoi(char *num, int32_t len);
char * strbetween(char *string, char *begin, char *end);
char * paGetToken(char *src, char **token, int32_t *tokenLen);
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index a22ce34a0e3030f409948cfcf3e739335d6417cb..6577a0a0f4710951cf240f792d68f1afb2d37569 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -741,17 +741,19 @@ void taosHashTableResize(SHashObj *pHashObj) {
}
SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
- SHashNode *pNewNode = calloc(1, sizeof(SHashNode) + keyLen + dsize);
+ SHashNode *pNewNode = malloc(sizeof(SHashNode) + keyLen + dsize);
if (pNewNode == NULL) {
uError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
- pNewNode->keyLen = (uint32_t)keyLen;
+ pNewNode->keyLen = (uint32_t)keyLen;
pNewNode->hashVal = hashVal;
pNewNode->dataLen = (uint32_t) dsize;
- pNewNode->count = 1;
+ pNewNode->count = 1;
+ pNewNode->removed = 0;
+ pNewNode->next = NULL;
memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize);
memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen);
diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c
index d0d126c1e4d7f2e7c0913585df6031b556291fc3..007ce0682974d06bf506a82d8bbbc809092eb9e4 100644
--- a/src/util/src/tarray.c
+++ b/src/util/src/tarray.c
@@ -24,11 +24,12 @@ void* taosArrayInit(size_t size, size_t elemSize) {
size = TARRAY_MIN_SIZE;
}
- SArray* pArray = calloc(1, sizeof(SArray));
+ SArray* pArray = malloc(sizeof(SArray));
if (pArray == NULL) {
return NULL;
}
+ pArray->size = 0;
pArray->pData = calloc(size, elemSize);
if (pArray->pData == NULL) {
free(pArray);
@@ -112,14 +113,15 @@ void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfEle
i += 1;
}
- assert(i == pData[numOfElems - 1] + 1);
+ assert(i == pData[numOfElems - 1] + 1 && i <= size);
- int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1;
int32_t srcIndex = pData[numOfElems - 1] + 1;
-
- char* dst = TARRAY_GET_ELEM(pArray, dstIndex);
- char* src = TARRAY_GET_ELEM(pArray, srcIndex);
- memmove(dst, src, pArray->elemSize * (pArray->size - numOfElems));
+ int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1;
+ if (pArray->size - srcIndex > 0) {
+ char* dst = TARRAY_GET_ELEM(pArray, dstIndex);
+ char* src = TARRAY_GET_ELEM(pArray, srcIndex);
+ memmove(dst, src, pArray->elemSize * (pArray->size - srcIndex));
+ }
pArray->size -= numOfElems;
}
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 69b3741e13c9e0b3ee00615a29851a3f690a1e84..589d3d4fa57c42b472319673a72d2e7ab599689f 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -71,6 +71,8 @@ static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT;
static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER;
static SArray* pCacheArrayList = NULL;
static bool stopRefreshWorker = false;
+static bool refreshWorkerNormalStopped = false;
+static bool refreshWorkerUnexpectedStopped = false;
static void doInitRefreshThread(void) {
pCacheArrayList = taosArrayInit(4, POINTER_BYTES);
@@ -503,7 +505,8 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
typedef struct SHashTravSupp {
SCacheObj* pCacheObj;
int64_t time;
- __cache_free_fn_t fp;
+ __cache_trav_fn_t fp;
+ void* param1;
} SHashTravSupp;
static bool travHashTableEmptyFn(void* param, void* data) {
@@ -537,7 +540,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
// wait for the refresh thread quit before destroying the cache object.
+ // But in the dll, the child thread will be killed before atexit takes effect.
while(atomic_load_8(&pCacheObj->deleting) != 0) {
+ if (refreshWorkerNormalStopped) break;
+ if (refreshWorkerUnexpectedStopped) return;
taosMsleep(50);
}
@@ -640,7 +646,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
// todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup(pCacheObj->pHashTable);
- taosTrashcanEmpty(pCacheObj, false);
+ taosTrashcanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj);
@@ -662,20 +668,26 @@ bool travHashTableFn(void* param, void* data) {
}
if (ps->fp) {
- (ps->fp)(pNode->data);
+ (ps->fp)(pNode->data, ps->param1);
}
// do not remove element in hash table
return true;
}
-static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) {
+static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) {
assert(pCacheObj != NULL);
- SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time};
+ SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1};
taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup);
}
+void taosCacheRefreshWorkerUnexpectedStopped(void) {
+ if(!refreshWorkerNormalStopped) {
+ refreshWorkerUnexpectedStopped=true;
+ }
+}
+
void* taosCacheTimedRefresh(void *handle) {
assert(pCacheArrayList != NULL);
uDebug("cache refresh thread starts");
@@ -684,6 +696,7 @@ void* taosCacheTimedRefresh(void *handle) {
const int32_t SLEEP_DURATION = 500; //500 ms
int64_t count = 0;
+ atexit(taosCacheRefreshWorkerUnexpectedStopped);
while(1) {
taosMsleep(SLEEP_DURATION);
@@ -736,7 +749,7 @@ void* taosCacheTimedRefresh(void *handle) {
// refresh data in hash table
if (elemInHash > 0) {
int64_t now = taosGetTimestampMs();
- doCacheRefresh(pCacheObj, now, NULL);
+ doCacheRefresh(pCacheObj, now, NULL, NULL);
}
taosTrashcanEmpty(pCacheObj, false);
@@ -748,20 +761,21 @@ void* taosCacheTimedRefresh(void *handle) {
pCacheArrayList = NULL;
pthread_mutex_destroy(&guard);
+ refreshWorkerNormalStopped=true;
uDebug("cache refresh thread quits");
return NULL;
}
-void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) {
+void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1) {
if (pCacheObj == NULL) {
return;
}
int64_t now = taosGetTimestampMs();
- doCacheRefresh(pCacheObj, now, fp);
+ doCacheRefresh(pCacheObj, now, fp, param1);
}
-void taosStopCacheRefreshWorker() {
- stopRefreshWorker = false;
+void taosStopCacheRefreshWorker(void) {
+ stopRefreshWorker = true;
}
\ No newline at end of file
diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c
index a3c01d2be79074203744d0028d7e8dd143de9c24..179fbd05a5a8f5ddfb28b68130f87e26ed4e522f 100644
--- a/src/util/src/tcompare.c
+++ b/src/util/src/tcompare.c
@@ -12,57 +12,78 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
+#define _BSD_SOURCE
+#define _GNU_SOURCE
+#define _XOPEN_SOURCE
+#define _DEFAULT_SOURCE
-#include "os.h"
-#include "ttype.h"
#include "tcompare.h"
-#include "tarray.h"
+#include "tulog.h"
#include "hash.h"
+#include "regex.h"
+#include "os.h"
+#include "ttype.h"
int32_t setCompareBytes1(const void *pLeft, const void *pRight) {
- return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0;
+ return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0;
}
int32_t setCompareBytes2(const void *pLeft, const void *pRight) {
- return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0;
+ return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0;
}
int32_t setCompareBytes4(const void *pLeft, const void *pRight) {
- return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0;
+ return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0;
}
int32_t setCompareBytes8(const void *pLeft, const void *pRight) {
- return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0;
+ return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0;
}
-int32_t compareInt32Val(const void *pLeft, const void *pRight) {
- int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
+int32_t compareInt8Val(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT8_VAL(pLeft), right = GET_INT8_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
-int32_t compareInt64Val(const void *pLeft, const void *pRight) {
- int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
+int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) {
+ return compareInt8Val(pRight, pLeft);
+}
+
+int32_t compareInt16Val(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
-int32_t compareInt16Val(const void *pLeft, const void *pRight) {
- int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight);
+int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) {
+ return compareInt16Val(pRight, pLeft);
+}
+
+int32_t compareInt32Val(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
-int32_t compareInt8Val(const void *pLeft, const void *pRight) {
- int8_t left = GET_INT8_VAL(pLeft), right = GET_INT8_VAL(pRight);
+int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) {
+ return compareInt32Val(pRight, pLeft);
+}
+
+int32_t compareInt64Val(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight);
if (left > right) return 1;
if (left < right) return -1;
return 0;
}
+int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) {
+ return compareInt64Val(pRight, pLeft);
+}
+
int32_t compareUint32Val(const void *pLeft, const void *pRight) {
uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight);
if (left > right) return 1;
@@ -70,6 +91,10 @@ int32_t compareUint32Val(const void *pLeft, const void *pRight) {
return 0;
}
+int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) {
+ return compareUint32Val(pRight, pLeft);
+}
+
int32_t compareUint64Val(const void *pLeft, const void *pRight) {
uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight);
if (left > right) return 1;
@@ -77,6 +102,10 @@ int32_t compareUint64Val(const void *pLeft, const void *pRight) {
return 0;
}
+int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) {
+ return compareUint64Val(pRight, pLeft);
+}
+
int32_t compareUint16Val(const void *pLeft, const void *pRight) {
uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight);
if (left > right) return 1;
@@ -84,6 +113,10 @@ int32_t compareUint16Val(const void *pLeft, const void *pRight) {
return 0;
}
+int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) {
+ return compareUint16Val(pRight, pLeft);
+}
+
int32_t compareUint8Val(const void* pLeft, const void* pRight) {
uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight);
if (left > right) return 1;
@@ -91,6 +124,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) {
return 0;
}
+int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) {
+ return compareUint8Val(pRight, pLeft);
+}
+
int32_t compareFloatVal(const void *pLeft, const void *pRight) {
float p1 = GET_FLOAT_VAL(pLeft);
float p2 = GET_FLOAT_VAL(pRight);
@@ -108,8 +145,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
- }
- return FLT_GREATER(p1, p2) ? 1: -1;
+ }
+ return FLT_GREATER(p1, p2) ? 1: -1;
+}
+
+int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
+ return compareFloatVal(pRight, pLeft);
}
int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
@@ -129,14 +170,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
- }
- return FLT_GREATER(p1, p2) ? 1: -1;
+ }
+ return FLT_GREATER(p1, p2) ? 1: -1;
+}
+
+int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
+ return compareDoubleVal(pRight, pLeft);
}
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
-
+
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
@@ -149,14 +194,18 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
}
}
+int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) {
+ return compareLenPrefixedStr(pRight, pLeft);
+}
+
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
-
+
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
- int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE);
+ int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1);
if (ret == 0) {
return 0;
} else {
@@ -165,6 +214,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
}
}
+int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
+ return compareLenPrefixedWStr(pRight, pLeft);
+}
+
/*
* Compare two strings
* TSDB_MATCH: Match
@@ -177,52 +230,62 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
*/
int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) {
char c, c1;
-
+
int32_t i = 0;
int32_t j = 0;
-
+ int32_t o = 0;
+ int32_t m = 0;
+
while ((c = patterStr[i++]) != 0) {
if (c == pInfo->matchAll) { /* Match "*" */
-
+
while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) {
- if (c == pInfo->matchOne && (j > size || str[j++] == 0)) {
- // empty string, return not match
- return TSDB_PATTERN_NOWILDCARDMATCH;
+ if (c == pInfo->matchOne) {
+ if (j > size || str[j++] == 0) {
+ // empty string, return not match
+ return TSDB_PATTERN_NOWILDCARDMATCH;
+ } else {
+ ++o;
+ }
}
}
-
+
if (c == 0) {
return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */
}
-
+
char next[3] = {toupper(c), tolower(c), 0};
+ m = o;
while (1) {
- size_t n = strcspn(str, next);
- str += n;
-
+ size_t n = strcspn(str + m, next);
+ str += m + n;
+
if (str[0] == 0 || (n >= size)) {
break;
}
-
+
int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
}
+ m = 0;
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
-
+
c1 = str[j++];
+ ++o;
if (j <= size) {
+ if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue;
}
}
-
+
return TSDB_PATTERN_NOMATCH;
}
-
+
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
@@ -230,48 +293,48 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
wchar_t c, c1;
wchar_t matchOne = L'_'; // "_"
wchar_t matchAll = L'%'; // "%"
-
+
int32_t i = 0;
int32_t j = 0;
-
+
while ((c = patterStr[i++]) != 0) {
if (c == matchAll) { /* Match "%" */
-
+
while ((c = patterStr[i++]) == matchAll || c == matchOne) {
- if (c == matchOne && (j > size || str[j++] == 0)) {
+ if (c == matchOne && (j >= size || str[j++] == 0)) {
return TSDB_PATTERN_NOWILDCARDMATCH;
}
}
if (c == 0) {
return TSDB_PATTERN_MATCH;
}
-
+
wchar_t accept[3] = {towupper(c), towlower(c), 0};
while (1) {
size_t n = wcscspn(str, accept);
-
+
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
-
+
int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
}
}
-
+
return TSDB_PATTERN_NOWILDCARDMATCH;
}
-
+
c1 = str[j++];
-
+
if (j <= size) {
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
continue;
}
}
-
+
return TSDB_PATTERN_NOMATCH;
}
@@ -296,6 +359,51 @@ int32_t compareStrPatternComp(const void* pLeft, const void* pRight) {
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
+int32_t compareStrRegexCompMatch(const void* pLeft, const void* pRight) {
+ return compareStrRegexComp(pLeft, pRight);
+}
+
+int32_t compareStrRegexCompNMatch(const void* pLeft, const void* pRight) {
+ return compareStrRegexComp(pLeft, pRight) ? 0 : 1;
+}
+
+int32_t compareStrRegexComp(const void* pLeft, const void* pRight) {
+ size_t sz = varDataLen(pRight);
+ char *pattern = malloc(sz + 1);
+ memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
+ pattern[sz] = 0;
+
+ sz = varDataLen(pLeft);
+ char *str = malloc(sz + 1);
+ memcpy(str, varDataVal(pLeft), sz);
+ str[sz] = 0;
+
+ int errCode = 0;
+ regex_t regex;
+ char msgbuf[256] = {0};
+
+ int cflags = REG_EXTENDED;
+ if ((errCode = regcomp(®ex, pattern, cflags)) != 0) {
+ regerror(errCode, ®ex, msgbuf, sizeof(msgbuf));
+ uError("Failed to compile regex pattern %s. reason %s", pattern, msgbuf);
+ regfree(®ex);
+ free(str);
+ free(pattern);
+ return 1;
+ }
+
+ errCode = regexec(®ex, str, 0, NULL, 0);
+ if (errCode != 0 && errCode != REG_NOMATCH) {
+ regerror(errCode, ®ex, msgbuf, sizeof(msgbuf));
+ uDebug("Failed to match %s with pattern %s, reason %s", str, pattern, msgbuf)
+ }
+ int32_t result = (errCode == 0) ? 0 : 1;
+ regfree(®ex);
+ free(str);
+ free(pattern);
+ return result;
+}
+
int32_t taosArrayCompareString(const void* a, const void* b) {
const char* x = *(const char**)a;
const char* y = *(const char**)b;
@@ -303,10 +411,6 @@ int32_t taosArrayCompareString(const void* a, const void* b) {
return compareLenPrefixedStr(x, y);
}
-//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) {
-// const SArray* arr = (const SArray*) pRight;
-// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1;
-//}
int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0;
}
@@ -315,12 +419,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
- wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
+ wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
+
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@@ -330,26 +435,26 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
- case TSDB_DATA_TYPE_TINYINT:
- case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_UTINYINT:
return setCompareBytes1;
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_USMALLINT:
return setCompareBytes2;
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_UINT:
- case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_FLOAT:
return setCompareBytes4;
- case TSDB_DATA_TYPE_BIGINT:
- case TSDB_DATA_TYPE_UBIGINT:
- case TSDB_DATA_TYPE_DOUBLE:
- case TSDB_DATA_TYPE_TIMESTAMP:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ case TSDB_DATA_TYPE_TIMESTAMP:
return setCompareBytes8;
default:
assert(0);
}
}
-
+
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break;
@@ -360,19 +465,27 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
case TSDB_DATA_TYPE_FLOAT: comparFn = compareFloatVal; break;
case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break;
case TSDB_DATA_TYPE_BINARY: {
- if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */
+ if (optr == TSDB_RELATION_MATCH) {
+ comparFn = compareStrRegexCompMatch;
+ } else if (optr == TSDB_RELATION_NMATCH) {
+ comparFn = compareStrRegexCompNMatch;
+ } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */
comparFn = compareStrPatternComp;
} else if (optr == TSDB_RELATION_IN) {
comparFn = compareFindItemInSet;
} else { /* normal relational comparFn */
comparFn = compareLenPrefixedStr;
}
-
+
break;
}
-
+
case TSDB_DATA_TYPE_NCHAR: {
- if (optr == TSDB_RELATION_LIKE) {
+ if (optr == TSDB_RELATION_MATCH) {
+ comparFn = compareStrRegexCompMatch;
+ } else if (optr == TSDB_RELATION_NMATCH) {
+ comparFn = compareStrRegexCompNMatch;
+ } else if (optr == TSDB_RELATION_LIKE) {
comparFn = compareWStrPatternComp;
} else if (optr == TSDB_RELATION_IN) {
comparFn = compareFindItemInSet;
@@ -391,57 +504,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
comparFn = compareInt32Val;
break;
}
-
+
return comparFn;
}
-__compar_fn_t getKeyComparFunc(int32_t keyType) {
+__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
__compar_fn_t comparFn = NULL;
-
+
switch (keyType) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BOOL:
- comparFn = compareInt8Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc;
break;
case TSDB_DATA_TYPE_SMALLINT:
- comparFn = compareInt16Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc;
break;
case TSDB_DATA_TYPE_INT:
- comparFn = compareInt32Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP:
- comparFn = compareInt64Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc;
break;
case TSDB_DATA_TYPE_FLOAT:
- comparFn = compareFloatVal;
+ comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc;
break;
case TSDB_DATA_TYPE_DOUBLE:
- comparFn = compareDoubleVal;
+ comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc;
break;
case TSDB_DATA_TYPE_UTINYINT:
- comparFn = compareUint8Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc;
break;
case TSDB_DATA_TYPE_USMALLINT:
- comparFn = compareUint16Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc;
break;
case TSDB_DATA_TYPE_UINT:
- comparFn = compareUint32Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc;
break;
case TSDB_DATA_TYPE_UBIGINT:
- comparFn = compareUint64Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc;
break;
case TSDB_DATA_TYPE_BINARY:
- comparFn = compareLenPrefixedStr;
+ comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc;
break;
case TSDB_DATA_TYPE_NCHAR:
- comparFn = compareLenPrefixedWStr;
+ comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc;
break;
default:
- comparFn = compareInt32Val;
+ comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
}
-
+
return comparFn;
}
@@ -465,8 +578,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
}
-
- int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE);
+ int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len);
if (ret == 0) {
return ret;
}
@@ -475,7 +587,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
default: { // todo refactor
tstr* t1 = (tstr*) f1;
tstr* t2 = (tstr*) f2;
-
+
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
} else {
diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c
index 5a3dc3f9bcdee41f974e48f22b27beb2a1eb5a35..9ce6876fd6d2c555acf5450a9128f787ccd300c8 100644
--- a/src/util/src/tconfig.c
+++ b/src/util/src/tconfig.c
@@ -26,6 +26,11 @@
SGlobalCfg tsGlobalConfig[TSDB_CFG_MAX_NUM] = {{0}};
int32_t tsGlobalConfigNum = 0;
+#define ATOI_JUDGE if ( !value && strcmp(input_value, "0") != 0) { \
+ uError("atoi error, input value:%s",input_value); \
+ return false; \
+ }
+
static char *tsGlobalUnit[] = {
" ",
"(%)",
@@ -44,12 +49,14 @@ char *tsCfgStatusStr[] = {
"program argument list"
};
-static void taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) {
float value = (float)atof(input_value);
+ ATOI_JUDGE
float *option = (float *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = value;
@@ -57,16 +64,20 @@ static void taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
-static void taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) {
double value = atof(input_value);
+ ATOI_JUDGE
double *option = (double *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = value;
@@ -74,17 +85,21 @@ static void taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
-static void taosReadInt32Config(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadInt32Config(SGlobalCfg *cfg, char *input_value) {
int32_t value = atoi(input_value);
+ ATOI_JUDGE
int32_t *option = (int32_t *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = value;
@@ -92,16 +107,20 @@ static void taosReadInt32Config(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
-static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadInt16Config(SGlobalCfg *cfg, char *input_value) {
int32_t value = atoi(input_value);
+ ATOI_JUDGE
int16_t *option = (int16_t *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = (int16_t)value;
@@ -109,16 +128,20 @@ static void taosReadInt16Config(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
-static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) {
int32_t value = atoi(input_value);
+ ATOI_JUDGE
uint16_t *option = (uint16_t *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = (uint16_t)value;
@@ -126,16 +149,20 @@ static void taosReadUInt16Config(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
-static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadInt8Config(SGlobalCfg *cfg, char *input_value) {
int32_t value = atoi(input_value);
+ ATOI_JUDGE
int8_t *option = (int8_t *)cfg->ptr;
if (value < cfg->minValue || value > cfg->maxValue) {
uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d",
cfg->option, input_value, cfg->minValue, cfg->maxValue, *option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
*option = (int8_t)value;
@@ -143,8 +170,10 @@ static void taosReadInt8Config(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], *option);
+ return false;
}
}
+ return true;
}
static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
@@ -191,12 +220,13 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
return true;
}
-static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) {
uint32_t value = taosInetAddr(input_value);
char * option = (char *)cfg->ptr;
if (value == INADDR_NONE) {
uError("config option:%s, input value:%s, is not a valid ip address, use default value:%s",
cfg->option, input_value, option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
strncpy(option, input_value, cfg->ptrLength);
@@ -204,16 +234,19 @@ static void taosReadIpStrConfig(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], option);
+ return false;
}
}
+ return true;
}
-static void taosReadStringConfig(SGlobalCfg *cfg, char *input_value) {
+static bool taosReadStringConfig(SGlobalCfg *cfg, char *input_value) {
int length = (int) strlen(input_value);
char *option = (char *)cfg->ptr;
if (length <= 0 || length > cfg->ptrLength) {
uError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s",
cfg->option, input_value, cfg->ptrLength, option);
+ return false;
} else {
if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) {
strncpy(option, input_value, cfg->ptrLength);
@@ -221,8 +254,10 @@ static void taosReadStringConfig(SGlobalCfg *cfg, char *input_value) {
} else {
uWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value,
tsCfgStatusStr[cfg->cfgStatus], option);
+ return false;
}
}
+ return true;
}
static void taosReadLogOption(char *option, char *value) {
@@ -258,51 +293,59 @@ SGlobalCfg *taosGetConfigOption(const char *option) {
return NULL;
}
-static void taosReadConfigOption(const char *option, char *value, char *value2, char *value3) {
+bool taosReadConfigOption(const char *option, char *value, char *value2, char *value3,
+ int8_t cfgStatus, int8_t sourceType) {
+ bool ret = false;
for (int i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i;
if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG)) continue;
+ if (sourceType != 0 && !(cfg->cfgType & sourceType)) continue;
if (strcasecmp(cfg->option, option) != 0) continue;
switch (cfg->valType) {
case TAOS_CFG_VTYPE_INT8:
- taosReadInt8Config(cfg, value);
+ ret = taosReadInt8Config(cfg, value);
break;
case TAOS_CFG_VTYPE_INT16:
- taosReadInt16Config(cfg, value);
+ ret = taosReadInt16Config(cfg, value);
break;
case TAOS_CFG_VTYPE_INT32:
- taosReadInt32Config(cfg, value);
+ ret = taosReadInt32Config(cfg, value);
break;
case TAOS_CFG_VTYPE_UINT16:
- taosReadUInt16Config(cfg, value);
+ ret = taosReadUInt16Config(cfg, value);
break;
case TAOS_CFG_VTYPE_FLOAT:
- taosReadFloatConfig(cfg, value);
+ ret = taosReadFloatConfig(cfg, value);
break;
case TAOS_CFG_VTYPE_DOUBLE:
- taosReadDoubleConfig(cfg, value);
+ ret = taosReadDoubleConfig(cfg, value);
break;
case TAOS_CFG_VTYPE_STRING:
- taosReadStringConfig(cfg, value);
+ ret = taosReadStringConfig(cfg, value);
break;
case TAOS_CFG_VTYPE_IPSTR:
- taosReadIpStrConfig(cfg, value);
+ ret = taosReadIpStrConfig(cfg, value);
break;
case TAOS_CFG_VTYPE_DIRECTORY:
- taosReadDirectoryConfig(cfg, value);
+ ret = taosReadDirectoryConfig(cfg, value);
break;
case TAOS_CFG_VTYPE_DATA_DIRCTORY:
if (taosReadDirectoryConfig(cfg, value)) {
- taosReadDataDirCfg(value, value2, value3);
+ taosReadDataDirCfg(value, value2, value3);
+ ret = true;
}
+ ret = false;
break;
default:
uError("config option:%s, input value:%s, can't be recognized", option, value);
- break;
+ ret = false;
+ }
+ if(ret && cfgStatus == TAOS_CFG_CSTATUS_OPTION){
+ cfg->cfgStatus = TAOS_CFG_CSTATUS_OPTION;
}
- break;
}
+ return ret;
}
void taosInitConfigOption(SGlobalCfg cfg) {
@@ -336,6 +379,9 @@ void taosReadGlobalLogCfg() {
#elif (_TD_TQ_ == true)
printf("configDir:%s not there, use default value: /etc/tq", configDir);
strcpy(configDir, "/etc/tq");
+ #elif (_TD_PRO_ == true)
+ printf("configDir:%s not there, use default value: /etc/ProDB", configDir);
+ strcpy(configDir, "/etc/ProDB");
#else
printf("configDir:%s not there, use default value: /etc/taos", configDir);
strcpy(configDir, "/etc/taos");
@@ -437,7 +483,7 @@ bool taosReadGlobalCfg() {
if (vlen3 != 0) value3[vlen3] = 0;
}
- taosReadConfigOption(option, value, value2, value3);
+ taosReadConfigOption(option, value, value2, value3, TAOS_CFG_CSTATUS_FILE, 0);
}
fclose(fp);
@@ -560,4 +606,4 @@ void taosDumpGlobalCfg() {
taosDumpCfg(cfg);
}
-}
+}
\ No newline at end of file
diff --git a/src/util/src/terror.c b/src/util/src/terror.c
index 49e46cdde8f7689083b65f747df35a13318c4e46..e3d022a6b0a4a929b6c06b2c305fb71b6980a865 100644
--- a/src/util/src/terror.c
+++ b/src/util/src/terror.c
@@ -112,6 +112,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too lon
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_TAG_NAMES, "duplicated tag names")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON, "Invalid JSON format")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_TYPE, "Invalid JSON data type")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range")
// mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
@@ -194,6 +201,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC, "Invalid func")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_BUFSIZE, "Invalid func bufSize")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TAG_LENGTH, "invalid tag length")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_COLUMN_LENGTH, "invalid column length")
+
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, "Database not specified or available")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, "Database already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, "Invalid database options")
diff --git a/src/util/src/tfunctional.c b/src/util/src/tfunctional.c
index c470a2b8aefc11141c9125e60c1c45fcbb949f09..8b20f8fc0a6b6e08bcfd501f625c4594b4dff50d 100644
--- a/src/util/src/tfunctional.c
+++ b/src/util/src/tfunctional.c
@@ -14,23 +14,24 @@
*/
#include "tfunctional.h"
-#include "tarray.h"
-
tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) {
tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*)));
+ if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func;
return pSavedFunc;
}
tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) {
tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *));
+ if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func;
return pSavedFunc;
}
tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) {
tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*));
+ if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func;
return pSavedFunc;
}
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index 1ce3eadf58432337511d0d600848ad334b96fc91..0d335ca2664ffee75a79144b97181a5b625df66d 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -85,6 +85,8 @@ int64_t dbgWSize = 0;
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power";
#elif (_TD_TQ_ == true)
char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq";
+#elif (_TD_PRO_ == true)
+char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/ProDB";
#else
char tsLogDir[PATH_MAX] = "/var/log/taos";
#endif
diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c
index e793548407ad37e2021fdba7db106db3a48fcaf0..0f104c4b63a36880a79ad564a0f837f9b09e7819 100644
--- a/src/util/src/tlosertree.c
+++ b/src/util/src/tlosertree.c
@@ -90,12 +90,13 @@ void tLoserTreeAdjust(SLoserTreeInfo* pTree, int32_t idx) {
SLoserTreeNode kLeaf = pTree->pNode[idx];
while (parentId > 0) {
- if (pTree->pNode[parentId].index == -1) {
+ SLoserTreeNode* pCur = &pTree->pNode[parentId];
+ if (pCur->index == -1) {
pTree->pNode[parentId] = kLeaf;
return;
}
- int32_t ret = pTree->comparFn(&pTree->pNode[parentId], &kLeaf, pTree->param);
+ int32_t ret = pTree->comparFn(pCur, &kLeaf, pTree->param);
if (ret < 0) {
SLoserTreeNode t = pTree->pNode[parentId];
pTree->pNode[parentId] = kLeaf;
diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c
index 0bab7b7e6623ebaa5de6d6511fa1c43719372ef5..2a147ee4f17a38e9a00a6110fcc6f2d21fb7b131 100644
--- a/src/util/src/tnettest.c
+++ b/src/util/src/tnettest.c
@@ -27,6 +27,10 @@
#include "syncMsg.h"
#define MAX_PKG_LEN (64 * 1000)
+#define MAX_SPEED_PKG_LEN (1024 * 1024 * 1024)
+#define MIN_SPEED_PKG_LEN 1024
+#define MAX_SPEED_PKG_NUM 10000
+#define MIN_SPEED_PKG_NUM 1
#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
extern int32_t tsRpcMaxUdpSize;
@@ -466,6 +470,7 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) {
sendpkgLen = pkgLen;
}
+ tsRpcForceTcp = 1;
int32_t ret = taosNetCheckRpc(host, port, sendpkgLen, spi, NULL);
if (ret < 0) {
printf("failed to test TCP port:%d\n", port);
@@ -479,6 +484,7 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) {
sendpkgLen = pkgLen;
}
+ tsRpcForceTcp = 0;
ret = taosNetCheckRpc(host, port, pkgLen, spi, NULL);
if (ret < 0) {
printf("failed to test UDP port:%d\n", port);
@@ -542,12 +548,111 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) {
}
}
-void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
+static void taosNetTestFqdn(char *host) {
+ int code = 0;
+ uint64_t startTime = taosGetTimestampUs();
+ uint32_t ip = taosGetIpv4FromFqdn(host);
+ if (ip == 0xffffffff) {
+ uError("failed to get IP address from %s since %s", host, strerror(errno));
+ code = -1;
+ }
+ uint64_t endTime = taosGetTimestampUs();
+ uint64_t el = endTime - startTime;
+ printf("check convert fqdn spend, status: %d\tcost: %" PRIu64 " us\n", code, el);
+ return;
+}
+
+static void taosNetCheckSpeed(char *host, int32_t port, int32_t pkgLen,
+ int32_t pkgNum, char *pkgType) {
+ // record config
+ int32_t compressTmp = tsCompressMsgSize;
+ int32_t maxUdpSize = tsRpcMaxUdpSize;
+ int32_t forceTcp = tsRpcForceTcp;
+
+ if (0 == strcmp("tcp", pkgType)){
+ tsRpcForceTcp = 1;
+ tsRpcMaxUdpSize = 0; // force tcp
+ } else {
+ tsRpcForceTcp = 0;
+ tsRpcMaxUdpSize = INT_MAX;
+ }
+ tsCompressMsgSize = -1;
+
+ SRpcEpSet epSet;
+ SRpcMsg reqMsg;
+ SRpcMsg rspMsg;
+ void * pRpcConn;
+ char secretEncrypt[32] = {0};
+ char spi = 0;
+ pRpcConn = taosNetInitRpc(secretEncrypt, spi);
+ if (NULL == pRpcConn) {
+ uError("failed to init client rpc");
+ return;
+ }
+
+ printf("check net spend, host:%s port:%d pkgLen:%d pkgNum:%d pkgType:%s\n\n", host, port, pkgLen, pkgNum, pkgType);
+ int32_t totalSucc = 0;
+ uint64_t startT = taosGetTimestampUs();
+ for (int32_t i = 1; i <= pkgNum; i++) {
+ uint64_t startTime = taosGetTimestampUs();
+
+ memset(&epSet, 0, sizeof(SRpcEpSet));
+ epSet.inUse = 0;
+ epSet.numOfEps = 1;
+ epSet.port[0] = port;
+ strcpy(epSet.fqdn[0], host);
+
+ reqMsg.msgType = TSDB_MSG_TYPE_NETWORK_TEST;
+ reqMsg.pCont = rpcMallocCont(pkgLen);
+ reqMsg.contLen = pkgLen;
+ reqMsg.code = 0;
+ reqMsg.handle = NULL; // rpc handle returned to app
+ reqMsg.ahandle = NULL; // app handle set by client
+ strcpy(reqMsg.pCont, "nettest speed");
+
+ rpcSendRecv(pRpcConn, &epSet, &reqMsg, &rspMsg);
+
+ int code = 0;
+ if ((rspMsg.code != 0) || (rspMsg.msgType != TSDB_MSG_TYPE_NETWORK_TEST + 1)) {
+ uError("ret code 0x%x %s", rspMsg.code, tstrerror(rspMsg.code));
+ code = -1;
+ }else{
+ totalSucc ++;
+ }
+
+ rpcFreeCont(rspMsg.pCont);
+
+ uint64_t endTime = taosGetTimestampUs();
+ uint64_t el = endTime - startTime;
+ printf("progress:%5d/%d\tstatus:%d\tcost:%8.2lf ms\tspeed:%8.2lf MB/s\n", i, pkgNum, code, el/1000.0, pkgLen/(el/1000000.0)/1024.0/1024.0);
+ }
+ int64_t endT = taosGetTimestampUs();
+ uint64_t elT = endT - startT;
+ printf("\ntotal succ:%5d/%d\tcost:%8.2lf ms\tspeed:%8.2lf MB/s\n", totalSucc, pkgNum, elT/1000.0, pkgLen/(elT/1000000.0)/1024.0/1024.0*totalSucc);
+
+ rpcClose(pRpcConn);
+
+ // return config
+ tsCompressMsgSize = compressTmp;
+ tsRpcMaxUdpSize = maxUdpSize;
+ tsRpcForceTcp = forceTcp;
+ return;
+}
+
+void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen,
+ int32_t pkgNum, char *pkgType) {
tscEmbedded = 1;
if (host == NULL) host = tsLocalFqdn;
if (port == 0) port = tsServerPort;
- if (pkgLen <= 10) pkgLen = 1000;
- if (pkgLen > MAX_PKG_LEN) pkgLen = MAX_PKG_LEN;
+ if (0 == strcmp("speed", role)){
+ if (pkgLen <= MIN_SPEED_PKG_LEN) pkgLen = MIN_SPEED_PKG_LEN;
+ if (pkgLen > MAX_SPEED_PKG_LEN) pkgLen = MAX_SPEED_PKG_LEN;
+ if (pkgNum <= MIN_SPEED_PKG_NUM) pkgNum = MIN_SPEED_PKG_NUM;
+ if (pkgNum > MAX_SPEED_PKG_NUM) pkgNum = MAX_SPEED_PKG_NUM;
+ }else{
+ if (pkgLen <= 10) pkgLen = 1000;
+ if (pkgLen > MAX_PKG_LEN) pkgLen = MAX_PKG_LEN;
+ }
if (0 == strcmp("client", role)) {
taosNetTestClient(host, port, pkgLen);
@@ -560,6 +665,12 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
taosNetCheckSync(host, port);
} else if (0 == strcmp("startup", role)) {
taosNetTestStartup(host, port);
+ } else if (0 == strcmp("speed", role)) {
+ tscEmbedded = 0;
+ char type[10] = {0};
+ taosNetCheckSpeed(host, port, pkgLen, pkgNum, strtolower(type, pkgType));
+ }else if (0 == strcmp("fqdn", role)) {
+ taosNetTestFqdn(host);
} else {
taosNetTestStartup(host, port);
}
diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c
index b464519ba66776ba13ce2964070d19a2a4430bfb..98fd9c094cba3e779c9f203fdacc548a3bda5ef4 100644
--- a/src/util/src/tskiplist.c
+++ b/src/util/src/tskiplist.c
@@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
pSkipList->keyFn = fn;
pSkipList->seed = rand();
if (comparFn == NULL) {
- pSkipList->comparFn = getKeyComparFunc(keyType);
+ pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC);
} else {
pSkipList->comparFn = comparFn;
}
diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c
index 77941cba82010a9187227b4740c4100680577403..8d69a87e77bff594e7a99b2a63d4d849214eebe9 100644
--- a/src/util/src/tsocket.c
+++ b/src/util/src/tsocket.c
@@ -488,7 +488,7 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
return -1;
}
- if (listen(sockFd, 10) < 0) {
+ if (listen(sockFd, 1024) < 0) {
uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno));
taosCloseSocket(sockFd);
return -1;
diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c
new file mode 100644
index 0000000000000000000000000000000000000000..043b2de2f241297d209041294428dde2c55e974e
--- /dev/null
+++ b/src/util/src/tthread.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+#include "tthread.h"
+#include "tglobal.h"
+#include "taosdef.h"
+#include "tutil.h"
+#include "tulog.h"
+#include "taoserror.h"
+
+// create new thread
+pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) {
+ pthread_t* pthread = (pthread_t*)malloc(sizeof(pthread_t));
+ pthread_attr_t thattr;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ int32_t ret = pthread_create(pthread, &thattr, __start_routine, param);
+ pthread_attr_destroy(&thattr);
+
+ if (ret != 0) {
+ free(pthread);
+ return NULL;
+ }
+ return pthread;
+}
+
+// destory thread
+bool taosDestoryThread(pthread_t* pthread) {
+ if(pthread == NULL) return false;
+ if(taosThreadRunning(pthread)) {
+ pthread_cancel(*pthread);
+ pthread_join(*pthread, NULL);
+ }
+
+ free(pthread);
+ return true;
+}
+
+// thread running return true
+bool taosThreadRunning(pthread_t* pthread) {
+ if(pthread == NULL) return false;
+ int ret = pthread_kill(*pthread, 0);
+ if(ret == ESRCH)
+ return false;
+ if(ret == EINVAL)
+ return false;
+ // alive
+ return true;
+}
diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c
index c4d05b2d5a851ee4c2a8232095dd6ea5567213ab..289c4a6ef5d5db1a04fdb33985ed4de959375f8d 100644
--- a/src/util/src/ttokenizer.c
+++ b/src/util/src/ttokenizer.c
@@ -53,6 +53,7 @@ static SKeyword keywordTable[] = {
{"NOTNULL", TK_NOTNULL},
{"IS", TK_IS},
{"LIKE", TK_LIKE},
+ {"MATCH", TK_MATCH},
{"GLOB", TK_GLOB},
{"BETWEEN", TK_BETWEEN},
{"IN", TK_IN},
@@ -137,6 +138,7 @@ static SKeyword keywordTable[] = {
{"COMMA", TK_COMMA},
{"NULL", TK_NULL},
{"SELECT", TK_SELECT},
+ {"EVERY", TK_EVERY},
{"FROM", TK_FROM},
{"VARIABLE", TK_VARIABLE},
{"INTERVAL", TK_INTERVAL},
@@ -193,6 +195,7 @@ static SKeyword keywordTable[] = {
{"INITIALLY", TK_INITIALLY},
{"INSTEAD", TK_INSTEAD},
{"MATCH", TK_MATCH},
+ {"NMATCH", TK_NMATCH},
{"KEY", TK_KEY},
{"OF", TK_OF},
{"RAISE", TK_RAISE},
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index 1a73991ade1ea4617fc4d3dab3904652ff46d691..5f8c92898fc5f0abc4c733c0558befd68ac3cac7 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -64,12 +64,15 @@ int32_t strRmquote(char *z, int32_t len){
int32_t j = 0;
for (uint32_t k = 1; k < len - 1; ++k) {
if (z[k] == '\\' || (z[k] == delim && z[k + 1] == delim)) {
+ if (z[k] == '\\' && z[k + 1] == '_') {
+ //match '_' self
+ } else {
z[j] = z[k + 1];
-
- cnt++;
- j++;
- k++;
- continue;
+ cnt++;
+ j++;
+ k++;
+ continue;
+ }
}
z[j] = z[k];
@@ -162,6 +165,8 @@ char *strnchr(char *haystack, char needle, int32_t len, bool skipquote) {
return NULL;
}
+
+
char* strtolower(char *dst, const char *src) {
int esc = 0;
char quote = 0, *p = dst, c;
@@ -197,7 +202,7 @@ char* strntolower(char *dst, const char *src, int32_t n) {
if (n == 0) {
*p = 0;
return dst;
- }
+ }
for (c = *src++; n-- > 0; c = *src++) {
if (esc) {
esc = 0;
@@ -219,6 +224,26 @@ char* strntolower(char *dst, const char *src, int32_t n) {
return dst;
}
+char* strntolower_s(char *dst, const char *src, int32_t n) {
+ char *p = dst, c;
+
+ assert(dst != NULL);
+ if (n == 0) {
+ return NULL;
+ }
+
+ while (n-- > 0) {
+ c = *src;
+ if (c >= 'A' && c <= 'Z') {
+ c -= 'A' - 'a';
+ }
+ *p++ = c;
+ src++;
+ }
+
+ return dst;
+}
+
char *paGetToken(char *string, char **token, int32_t *tokenLen) {
char quote = 0;
diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp
index dfbe0f67167ad12cedc0239fc310614ef747b080..df4c5af5e2ab62efab287f3dd00650fc29805c98 100644
--- a/src/util/tests/skiplistTest.cpp
+++ b/src/util/tests/skiplistTest.cpp
@@ -70,7 +70,7 @@ void doubleSkipListTest() {
}
void randKeyTest() {
- SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT),
+ SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
false, getkey);
int32_t size = 200000;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index f826c1aecd336a0eedeb3f02df0a7acc61895bb2..c823880ae2028c4bcfe26dbfc5cd60af62443722 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -560,5 +560,10 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) {
return vnodeSaveVersion(pVnode);
}
+ // timer thread callback
+ if(status == TSDB_STATUS_COMMIT_NOBLOCK) {
+ qSolveCommitNoBlock(pVnode->tsdb, pVnode->qMgmt);
+ }
+
return 0;
}
diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c
index 47749987994a3af3234b0149d00f5b89c75700ff..e991bf02aa68c92d7cf4dfdb09982ebaa6541bdc 100644
--- a/src/wal/src/walWrite.c
+++ b/src/wal/src/walWrite.c
@@ -17,6 +17,7 @@
#define TAOS_RANDOM_FILE_FAIL_TEST
#include "os.h"
#include "taoserror.h"
+#include "taosmsg.h"
#include "tchecksum.h"
#include "tfile.h"
#include "twal.h"
@@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) {
#if defined(WAL_CHECKSUM_WHOLE)
static void walUpdateChecksum(SWalHead *pHead) {
- pHead->sver = 1;
+ pHead->sver = 2;
pHead->cksum = 0;
pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len);
}
@@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) {
static int walValidateChecksum(SWalHead *pHead) {
if (pHead->sver == 0) { // for compatible with wal before sver 1
return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead));
- } else if (pHead->sver == 1) {
+ } else if (pHead->sver >= 1) {
uint32_t cksum = pHead->cksum;
pHead->cksum = 0;
return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum);
@@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_SUCCESS;
}
- if (pHead->sver == 1) {
+ if (pHead->sver >= 1) {
if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) {
wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos);
return TSDB_CODE_WAL_FILE_CORRUPTED;
@@ -306,7 +307,115 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd,
return TSDB_CODE_WAL_FILE_CORRUPTED;
}
+// Add SMemRowType ahead of SDataRow
+static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) {
+ // copy the header firstly
+ memcpy(pDest, pSrc, sizeof(SSubmitBlk));
+ int32_t nRows = htons(pDest->numOfRows);
+ int32_t dataLen = htonl(pDest->dataLen);
+
+ if ((nRows <= 0) || (dataLen <= 0)) {
+ return;
+ }
+
+ char *pDestData = pDest->data;
+ char *pSrcData = pSrc->data;
+ for (int32_t i = 0; i < nRows; ++i) {
+ memRowSetType(pDestData, SMEM_ROW_DATA);
+ memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData));
+ pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData));
+ pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData));
+ ++(*lenExpand);
+ }
+ pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t));
+}
+
+// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen
+static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) {
+ if ((nRows <= 0) || (dataLen <= 0)) {
+ return true;
+ }
+ int32_t len = 0, kvLen = 0;
+ for (int i = 0; i < nRows; ++i) {
+ len += dataRowLen(pBlkData);
+ if (len > dataLen) {
+ return false;
+ }
+
+ /**
+ * For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict.
+ * For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario
+ * - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check
+ */
+ if (dataRowLen(pBlkData) == 257) {
+ SMemRow memRow = pBlkData;
+ SKVRow kvRow = memRowKvBody(memRow);
+ int nCols = kvRowNCols(kvRow);
+ uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols);
+ uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset;
+ if (calcTsOffset == realTsOffset) {
+ kvLen += memRowKvTLen(memRow);
+ }
+ }
+ pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData));
+ }
+ if (len != dataLen) {
+ return false;
+ }
+ if (kvLen == dataLen) {
+ return false;
+ }
+ return true;
+}
+// for WAL SMemRow/SDataRow compatibility
+static int walSMemRowCheck(SWalHead *pHead) {
+ if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) {
+ SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont;
+ int32_t numOfBlocks = htonl(pMsg->numOfBlocks);
+ if (numOfBlocks <= 0) {
+ return 0;
+ }
+
+ int32_t nTotalRows = 0;
+ SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks;
+ for (int32_t i = 0; i < numOfBlocks; ++i) {
+ int32_t dataLen = htonl(pBlk->dataLen);
+ int32_t nRows = htons(pBlk->numOfRows);
+ nTotalRows += nRows;
+ if (!walIsSDataRow(pBlk->data, nRows, dataLen)) {
+ return 0;
+ }
+ pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen);
+ }
+ ASSERT(nTotalRows >= 0);
+ SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1);
+ if (pWalHead == NULL) {
+ return -1;
+ }
+
+ memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg));
+
+ SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont;
+ SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks;
+ SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks;
+ int32_t lenExpand = 0;
+ for (int32_t i = 0; i < numOfBlocks; ++i) {
+ expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand);
+ pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk));
+ pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk));
+ }
+ if (lenExpand > 0) {
+ pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand;
+ pDestMsg->length = htonl(pDestMsg->header.contLen);
+ pWalHead->len = pWalHead->len + lenExpand;
+ }
+
+ memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len);
+ tfree(pWalHead);
+ }
+ return 0;
+}
static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) {
int32_t size = WAL_MAX_SIZE;
@@ -346,7 +455,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
}
#if defined(WAL_CHECKSUM_WHOLE)
- if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) {
+ if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) {
wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@@ -379,7 +488,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
continue;
}
- if (pHead->sver == 1 && !walValidateChecksum(pHead)) {
+ if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) {
wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
pHead->version, pHead->len, offset);
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
@@ -431,7 +540,14 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version;
- //wInfo("writeFp: %ld", offset);
+ // wInfo("writeFp: %ld", offset);
+ if (0 != walSMemRowCheck(pHead)) {
+ wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
+ pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
+ tfClose(tfd);
+ tfree(buffer);
+ return TAOS_SYSTEM_ERROR(errno);
+ }
(*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL);
}
diff --git a/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs
new file mode 100644
index 0000000000000000000000000000000000000000..e6c3a598adc0bc4bcf5ea84953f649b418199555
--- /dev/null
+++ b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+
+namespace TDengineDriver
+{
+ enum TDengineDataType
+ {
+ TSDB_DATA_TYPE_NULL = 0, // 1 bytes
+ TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
+ TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
+ TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
+ TSDB_DATA_TYPE_INT = 4, // 4 bytes
+ TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
+ TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
+ TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
+ TSDB_DATA_TYPE_BINARY = 8, // string
+ TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
+ TSDB_DATA_TYPE_NCHAR = 10, // unicode string
+ TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
+ TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
+ TSDB_DATA_TYPE_UINT = 13, // 4 bytes
+ TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
+ }
+
+ enum TDengineInitOption
+ {
+ TSDB_OPTION_LOCALE = 0,
+ TSDB_OPTION_CHARSET = 1,
+ TSDB_OPTION_TIMEZONE = 2,
+ TDDB_OPTION_CONFIGDIR = 3,
+ TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
+ }
+
+ class TDengineMeta
+ {
+ public string name;
+ public short size;
+ public byte type;
+ public string TypeName()
+ {
+ switch ((TDengineDataType)type)
+ {
+ case TDengineDataType.TSDB_DATA_TYPE_BOOL:
+ return "BOOL";
+ case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
+ return "TINYINT";
+ case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
+ return "SMALLINT";
+ case TDengineDataType.TSDB_DATA_TYPE_INT:
+ return "INT";
+ case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
+ return "BIGINT";
+ case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
+ return "TINYINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
+ return "SMALLINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_UINT:
+ return "INT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
+ return "BIGINT UNSIGNED";
+ case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
+ return "FLOAT";
+ case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
+ return "DOUBLE";
+ case TDengineDataType.TSDB_DATA_TYPE_BINARY:
+ return "STRING";
+ case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
+ return "TIMESTAMP";
+ case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
+ return "NCHAR";
+ default:
+ return "undefine";
+ }
+ }
+ }
+
+ class TDengine
+ {
+ public const int TSDB_CODE_SUCCESS = 0;
+
+ [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Init();
+
+ [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Cleanup();
+
+ [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
+ static extern public void Options(int option, string value);
+
+ [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
+
+ [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
+ static extern private IntPtr taos_errstr(IntPtr res);
+ static public string Error(IntPtr res)
+ {
+ IntPtr errPtr = taos_errstr(res);
+ return Marshal.PtrToStringAnsi(errPtr);
+ }
+
+ [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ErrorNo(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr Query(IntPtr conn, string sqlstr);
+
+ [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int AffectRows(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int FieldCount(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
+ static extern private IntPtr taos_fetch_fields(IntPtr res);
+ static public List FetchFields(IntPtr res)
+ {
+ const int fieldSize = 68;
+
+ List metas = new List();
+ if (res == IntPtr.Zero)
+ {
+ return metas;
+ }
+
+ int fieldCount = FieldCount(res);
+ IntPtr fieldsPtr = taos_fetch_fields(res);
+
+ for (int i = 0; i < fieldCount; ++i)
+ {
+ int offset = i * fieldSize;
+
+ TDengineMeta meta = new TDengineMeta();
+ meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
+ meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
+ meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
+ metas.Add(meta);
+ }
+
+ return metas;
+ }
+
+ [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr FetchRows(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
+ static extern public IntPtr FreeResult(IntPtr res);
+
+ [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int Close(IntPtr taos);
+ //get precisionin parameter restultset
+ [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
+ static extern public int ResultPrecision(IntPtr taos);
+ }
+}
diff --git a/tests/connectorTest/C#Test/nanosupport/nanotest.cs b/tests/connectorTest/C#Test/nanosupport/nanotest.cs
new file mode 100644
index 0000000000000000000000000000000000000000..b9eaefef8c740f8196a715282c8c28ffd79bbdac
--- /dev/null
+++ b/tests/connectorTest/C#Test/nanosupport/nanotest.cs
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+using System;
+using System.Text;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Collections;
+namespace TDengineDriver
+{
+ class TDengineNanoTest
+ {
+ //connect parameters
+ private string host="localhost";
+ private string configDir="/etc/taos";
+ private string user="root";
+ private string password="taosdata";
+ private short port = 0;
+
+ //sql parameters
+ private string dbName;
+ private string tbName;
+ private string precision;
+
+ private bool isInsertData;
+ private bool isQueryData;
+
+ private long tableCount;
+ private long totalRows;
+ private long batchRows;
+ private long beginTimestamp = 1551369600000L;
+
+ private IntPtr conn = IntPtr.Zero;
+ private long rowsInserted = 0;
+
+ static void Main(string[] args)
+ {
+ TDengineNanoTest tester = new TDengineNanoTest();
+ //tester.ReadArgument(args);
+
+ tester.InitTDengine();
+ tester.ConnectTDengine();
+ tester.execute("reset query cache");
+ tester.execute("drop database if exists db");
+ tester.execute("create database db precision 'ns'");
+ tester.executeQuery("show databases;");
+ //tester.checkData(0,16,"ns");
+ tester.execute("use db");
+
+ Console.WriteLine("testing nanosecond support in 1st timestamp");
+ tester.execute("create table tb (ts timestamp, speed int)");
+ tester.execute("insert into tb values('2021-06-10 0:00:00.100000001', 1);");
+ tester.execute("insert into tb values(1623254400150000000, 2);");
+ tester.execute("import into tb values(1623254400300000000, 3);");
+ tester.execute("import into tb values(1623254400299999999, 4);");
+ tester.execute("insert into tb values(1623254400300000001, 5);");
+ tester.execute("insert into tb values(1623254400999999999, 7);");
+ tester.executeQuery("select * from tb;");
+
+ Console.WriteLine("expect data is ");
+
+ tester.executeQuery("select * from tb;");
+
+ // Console.WriteLine("expected is : {0}", width);
+ // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001");
+ // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000");
+ // tdSql.checkData(2,0,"2021-06-10 0:00:00.299999999");
+ // tdSql.checkData(3,1,3);
+ // tdSql.checkData(4,1,5);
+ // tdSql.checkData(5,1,7);
+ // tdSql.checkRows(6);
+
+ tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;");
+ Console.WriteLine("expected is : 1 " );
+ tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000001' and ts < '2021-06-10 0:00:00.160000000';");
+ Console.WriteLine("expected is : 1 " );
+
+ tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;");
+ Console.WriteLine("expected is : 1 " );
+ tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000000' and ts < '2021-06-10 0:00:00.150000000';");
+ Console.WriteLine("expected is : 1 " );
+
+ tester.executeQuery("select count(*) from tb where ts > 1623254400400000000;");
+ Console.WriteLine("expected is : 1 " );
+ tester.executeQuery("select count(*) from tb where ts < '2021-06-10 00:00:00.400000000';");
+ Console.WriteLine("expected is : 5 " );
+
+ tester.executeQuery("select count(*) from tb where ts > now + 400000000b;");
+ Console.WriteLine("expected is : 0 " );
+
+ tester.executeQuery("select count(*) from tb where ts >= '2021-06-10 0:00:00.100000001';");
+ Console.WriteLine("expected is : 6 " );
+
+ tester.executeQuery("select count(*) from tb where ts <= 1623254400300000000;");
+ Console.WriteLine("expected is : 4 " );
+
+ tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.000000000';");
+ Console.WriteLine("expected is : 0 " );
+
+ tester.executeQuery("select count(*) from tb where ts = 1623254400150000000;");
+ Console.WriteLine("expected is : 1 " );
+
+ tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.100000001';");
+ Console.WriteLine("expected is : 1 " );
+
+ tester.executeQuery("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;");
+ Console.WriteLine("expected is : 5 " );
+
+ tester.executeQuery("select count(*) from tb where ts between '2021-06-10 0:00:00.299999999' and '2021-06-10 0:00:00.300000001';");
+ Console.WriteLine("expected is : 3 " );
+
+ tester.executeQuery("select avg(speed) from tb interval(5000000000b);");
+ Console.WriteLine("expected is : 1 " );
+
+ tester.executeQuery("select avg(speed) from tb interval(100000000b)");
+ Console.WriteLine("expected is : 4 " );
+
+ // tdSql.error("select avg(speed) from tb interval(1b);")
+ // tdSql.error("select avg(speed) from tb interval(999b);")
+
+ tester.executeQuery("select avg(speed) from tb interval(1000b);");
+ Console.WriteLine("expected is : 5 rows " );
+
+ tester.executeQuery("select avg(speed) from tb interval(1u);");
+ Console.WriteLine("expected is : 5 rows " );
+
+ tester.executeQuery("select avg(speed) from tb interval(100000000b) sliding (100000000b);");
+ Console.WriteLine("expected is : 4 rows " );
+
+ tester.executeQuery("select last(*) from tb");
+ Console.WriteLine("expected is :1623254400999999999 " );
+
+ // tdSql.checkData(0,0, "2021-06-10 0:00:00.999999999")
+ // tdSql.checkData(0,0, 1623254400999999999)
+
+ tester.executeQuery("select first(*) from tb");
+ Console.WriteLine("expected is : 1623254400100000001" );
+ // tdSql.checkData(0,0, 1623254400100000001);
+ // tdSql.checkData(0,0, "2021-06-10 0:00:00.100000001");
+
+ tester.execute("insert into tb values(now + 500000000b, 6);");
+ tester.executeQuery("select * from tb;");
+ // tdSql.checkRows(7);
+
+ tester.execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);");
+ tester.execute("insert into tb2 values('2021-06-10 0:00:00.100000001', 1, '2021-06-11 0:00:00.100000001');");
+ tester.execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);");
+ tester.execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);");
+ tester.execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);");
+ tester.execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);");
+ tester.execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);");
+
+ tester.executeQuery("select * from tb2;");
+ // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001");
+ // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000");
+ // tdSql.checkData(2,1,4);
+ // tdSql.checkData(3,1,3);
+ // tdSql.checkData(4,2,"2021-06-11 00:00:00.300000001");
+ // tdSql.checkData(5,2,"2021-06-13 00:00:00.999999999");
+ // tdSql.checkRows(6);
+ tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;");
+ Console.WriteLine("expected is : 1 " );
+ // tdSql.checkData(0,0,1);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 > '2021-06-11 0:00:00.100000000' and ts2 < '2021-06-11 0:00:00.100000002';");
+ Console.WriteLine("expected is : 1 " );
+ // tdSql.checkData(0,0,1);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800500000000;");
+ Console.WriteLine("expected is : 1 " );
+ // tdSql.checkData(0,0,1);
+ tester.executeQuery("select count(*) from tb2 where ts2 < '2021-06-11 0:00:00.400000000';");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 > now + 400000000b;");
+ Console.WriteLine("expected is : 0 " );
+ // tdSql.checkRows(0);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 >= '2021-06-11 0:00:00.100000001';");
+ Console.WriteLine("expected is : 6 " );
+ // tdSql.checkData(0,0,6);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 <= 1623340800400000000;");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.000000000';");
+ Console.WriteLine("expected is : 0 " );
+ // tdSql.checkRows(0);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.300000001';");
+ Console.WriteLine("expected is : 1 " );
+ // tdSql.checkData(0,0,1);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 = 1623340800300000001;");
+ Console.WriteLine("expected is : 1 " );
+ // tdSql.checkData(0,0,1);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 between '2021-06-11 0:00:00.299999999' and '2021-06-11 0:00:00.300000001';");
+ Console.WriteLine("expected is : 3 " );
+ // tdSql.checkData(0,0,3);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 <> 1623513600999999999;");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000001';");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000000';");
+ Console.WriteLine("expected is : 6 " );
+ // tdSql.checkData(0,0,6);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 != 1623513600999999999;");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000001';");
+ Console.WriteLine("expected is : 5 " );
+ // tdSql.checkData(0,0,5);
+
+ tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000000';");
+ Console.WriteLine("expected is : 6 " );
+ // tdSql.checkData(0,0,6);
+
+ tester.execute("insert into tb2 values(now + 500000000b, 6, now +2d);");
+ tester.executeQuery("select * from tb2;");
+ Console.WriteLine("expected is : 7 rows" );
+ // tdSql.checkRows(7);
+
+ // tdLog.debug("testing ill nanosecond format handling");
+ tester.execute("create table tb3 (ts timestamp, speed int);");
+ // tdSql.error("insert into tb3 values(16232544001500000, 2);");
+ tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456', 2);");
+ tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456000';");
+ // tdSql.checkRows(1);
+ Console.WriteLine("expected is : 1 rows " );
+
+ tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456789000', 2);");
+ tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456789';");
+ // tdSql.checkRows(1);
+ Console.WriteLine("expected is : 1 rows " );
+
+ // check timezone support
+ Console.WriteLine("nsdb" );
+ tester.execute("drop database if exists nsdb;");
+ tester.execute("create database nsdb precision 'ns';");
+ tester.execute("use nsdb;");
+ tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);");
+ tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);" );
+ tester.executeQuery("select first(*) from tb1;");
+ Console.WriteLine("expected is : 1623258000123456789 " );
+ // tdSql.checkData(0,0,1623258000123456789);
+
+
+
+ Console.WriteLine("usdb" );
+ tester.execute("create database usdb precision 'us';");
+ tester.execute("use usdb;");
+ tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);");
+ tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);" );
+ tester.executeQuery("select first(*) from tb1;");
+
+ Console.WriteLine("expected is : 1623258000123456 " );
+
+ Console.WriteLine("msdb" );
+ tester.execute("drop database if exists msdb;");
+ tester.execute("create database msdb precision 'ms';");
+ tester.execute("use msdb;");
+ tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);");
+ tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);" );
+ tester.executeQuery("select first(*) from tb1;");
+ Console.WriteLine("expected is : 1623258000123 " );
+
+
+
+ tester.CloseConnection();
+ tester.cleanup();
+
+
+ }
+
+ public void InitTDengine()
+ {
+ TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir);
+ TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60");
+ Console.WriteLine("init...");
+ TDengine.Init();
+ Console.WriteLine("get connection starting...");
+ }
+
+ public void ConnectTDengine()
+ {
+ string db = "";
+ this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port);
+ if (this.conn == IntPtr.Zero)
+ {
+ Console.WriteLine("connection failed: " + this.host);
+ ExitProgram();
+ }
+ else
+ {
+ Console.WriteLine("[ OK ] Connection established.");
+ }
+ }
+ //EXECUTE SQL
+ public void execute(string sql)
+ {
+ DateTime dt1 = DateTime.Now;
+ IntPtr res = TDengine.Query(this.conn, sql.ToString());
+ DateTime dt2 = DateTime.Now;
+ TimeSpan span = dt2 - dt1;
+
+ if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
+ {
+ Console.Write(sql.ToString() + " failure, ");
+ if (res != IntPtr.Zero) {
+ Console.Write("reason: " + TDengine.Error(res));
+ }
+ Console.WriteLine("");
+ ExitProgram();
+ }
+ else
+ {
+ Console.WriteLine(sql.ToString() + " success");
+ }
+ TDengine.FreeResult(res);
+ }
+ //EXECUTE QUERY
+ public void executeQuery(string sql)
+ {
+
+ DateTime dt1 = DateTime.Now;
+ long queryRows = 0;
+ IntPtr res = TDengine.Query(conn, sql);
+ getPrecision(res);
+ if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
+ {
+ Console.Write(sql.ToString() + " failure, ");
+ if (res != IntPtr.Zero) {
+ Console.Write("reason: " + TDengine.Error(res));
+ }
+ Console.WriteLine("");
+ ExitProgram();
+ }
+ DateTime dt2 = DateTime.Now;
+ TimeSpan span = dt2 - dt1;
+ Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString());
+ int fieldCount = TDengine.FieldCount(res);
+
+ List metas = TDengine.FetchFields(res);
+ for (int j = 0; j < metas.Count; j++)
+ {
+ TDengineMeta meta = (TDengineMeta)metas[j];
+ }
+
+ IntPtr rowdata;
+ StringBuilder builder = new StringBuilder();
+ while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
+ {
+ queryRows++;
+ for (int fields = 0; fields < fieldCount; ++fields)
+ {
+ TDengineMeta meta = metas[fields];
+ int offset = IntPtr.Size * fields;
+ IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
+
+ builder.Append("---");
+
+ if (data == IntPtr.Zero)
+ {
+ builder.Append("NULL");
+ continue;
+ }
+
+ switch ((TDengineDataType)meta.type)
+ {
+ case TDengineDataType.TSDB_DATA_TYPE_BOOL:
+ bool v1 = Marshal.ReadByte(data) == 0 ? false : true;
+ builder.Append(v1);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
+ byte v2 = Marshal.ReadByte(data);
+ builder.Append(v2);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
+ short v3 = Marshal.ReadInt16(data);
+ builder.Append(v3);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_INT:
+ int v4 = Marshal.ReadInt32(data);
+ builder.Append(v4);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
+ long v5 = Marshal.ReadInt64(data);
+ builder.Append(v5);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
+ float v6 = (float)Marshal.PtrToStructure(data, typeof(float));
+ builder.Append(v6);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
+ double v7 = (double)Marshal.PtrToStructure(data, typeof(double));
+ builder.Append(v7);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_BINARY:
+ string v8 = Marshal.PtrToStringAnsi(data);
+ builder.Append(v8);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
+ long v9 = Marshal.ReadInt64(data);
+ builder.Append(v9);
+ break;
+ case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
+ string v10 = Marshal.PtrToStringAnsi(data);
+ builder.Append(v10);
+ break;
+ }
+ }
+ builder.Append("---");
+
+ if (queryRows <= 10)
+ {
+ Console.WriteLine(builder.ToString());
+ }
+ builder.Clear();
+ }
+
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res));
+ }
+ Console.WriteLine("");
+
+ TDengine.FreeResult(res);
+
+ }
+
+ public void CloseConnection()
+ {
+ if (this.conn != IntPtr.Zero)
+ {
+ TDengine.Close(this.conn);
+ Console.WriteLine("connection closed.");
+ }
+ }
+
+ static void ExitProgram()
+ {
+ System.Environment.Exit(0);
+ }
+
+ public void cleanup()
+ {
+ Console.WriteLine("clean up...");
+ System.Environment.Exit(0);
+ }
+
+ // method to get db precision
+ public void getPrecision(IntPtr res)
+ {
+ int psc=TDengine.ResultPrecision(res);
+ switch(psc)
+ {
+ case 0:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond");
+ break;
+ case 1:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond");
+ break;
+ case 2:
+ Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond");
+ break;
+ }
+
+ }
+
+ // public void checkData(int x ,int y , long ts ){
+
+ // }
+
+ }
+}
+
diff --git a/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js
new file mode 100644
index 0000000000000000000000000000000000000000..11812ac84b91d5c639a3b3bd73c8b81838c5cc23
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js
@@ -0,0 +1,290 @@
+const taos = require('td2.0-connector');
+var conn = taos.connect({host:"localhost", user:"root", password:"taosdata", config:"/etc/taos",port:6030})
+var c1 = conn.cursor();
+
+
+function checkData(sql,row,col,data){
+
+
+ console.log(sql)
+ c1.execute(sql)
+ var d = c1.fetchall();
+ let checkdata = d[row][col];
+ if (checkdata == data) {
+
+ console.log('check pass')
+ }
+ else{
+ console.log('check failed')
+ console.log('checked is :',checkdata)
+ console.log("expected is :",data)
+
+
+ }
+}
+
+
+// nano basic case
+
+c1.execute('reset query cache')
+c1.execute('drop database if exists db')
+c1.execute('create database db precision "ns";')
+c1.execute('use db');
+c1.execute('create table tb (ts timestamp, speed int)')
+c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);')
+c1.execute('insert into tb values(1623254400150000000, 2);')
+c1.execute('import into tb values(1623254400300000000, 3);')
+c1.execute('import into tb values(1623254400299999999, 4);')
+c1.execute('insert into tb values(1623254400300000001, 5);')
+c1.execute('insert into tb values(1623254400999999999, 7);')
+c1.execute('insert into tb values(1623254400123456789, 8);')
+sql = 'select * from tb;'
+
+console.log('*******************************************')
+console.log('this is area about checkdata result')
+//check data about insert data
+checkData(sql,0,0,'2021-06-10 00:00:00.100000001')
+checkData(sql,1,0,'2021-06-10 00:00:00.123456789')
+checkData(sql,2,0,'2021-06-10 00:00:00.150000000')
+checkData(sql,3,0,'2021-06-10 00:00:00.299999999')
+checkData(sql,4,0,'2021-06-10 00:00:00.300000000')
+checkData(sql,5,0,'2021-06-10 00:00:00.300000001')
+checkData(sql,6,0,'2021-06-10 00:00:00.999999999')
+checkData(sql,0,1,1)
+checkData(sql,1,1,8)
+checkData(sql,2,1,2)
+checkData(sql,5,1,5)
+
+
+
+// us basic case
+
+c1.execute('reset query cache')
+c1.execute('drop database if exists usdb')
+c1.execute('create database usdb precision "us";')
+c1.execute('use usdb');
+c1.execute('create table tb (ts timestamp, speed int)')
+c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);')
+c1.execute('insert into tb values(1623254400150000, 2);')
+c1.execute('import into tb values(1623254400300000, 3);')
+c1.execute('import into tb values(1623254400299999, 4);')
+c1.execute('insert into tb values(1623254400300001, 5);')
+c1.execute('insert into tb values(1623254400999999, 7);')
+c1.execute('insert into tb values(1623254400123789, 8);')
+sql = 'select * from tb;'
+
+console.log('*******************************************')
+
+//check data about insert data
+checkData(sql,0,0,'2021-06-10 00:00:00.100001')
+checkData(sql,1,0,'2021-06-10 00:00:00.123789')
+checkData(sql,2,0,'2021-06-10 00:00:00.150000')
+checkData(sql,3,0,'2021-06-10 00:00:00.299999')
+checkData(sql,4,0,'2021-06-10 00:00:00.300000')
+checkData(sql,5,0,'2021-06-10 00:00:00.300001')
+checkData(sql,6,0,'2021-06-10 00:00:00.999999')
+checkData(sql,0,1,1)
+checkData(sql,1,1,8)
+checkData(sql,2,1,2)
+checkData(sql,5,1,5)
+
+console.log('*******************************************')
+
+// ms basic case
+
+c1.execute('reset query cache')
+c1.execute('drop database if exists msdb')
+c1.execute('create database msdb precision "ms";')
+c1.execute('use msdb');
+c1.execute('create table tb (ts timestamp, speed int)')
+c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);')
+c1.execute('insert into tb values(1623254400150, 2);')
+c1.execute('import into tb values(1623254400300, 3);')
+c1.execute('import into tb values(1623254400299, 4);')
+c1.execute('insert into tb values(1623254400301, 5);')
+c1.execute('insert into tb values(1623254400789, 7);')
+c1.execute('insert into tb values(1623254400999, 8);')
+sql = 'select * from tb;'
+
+console.log('*******************************************')
+console.log('this is area about checkdata result')
+//check data about insert data
+checkData(sql,0,0,'2021-06-10 00:00:00.101')
+checkData(sql,1,0,'2021-06-10 00:00:00.150')
+checkData(sql,2,0,'2021-06-10 00:00:00.299')
+checkData(sql,3,0,'2021-06-10 00:00:00.300')
+checkData(sql,4,0,'2021-06-10 00:00:00.301')
+checkData(sql,5,0,'2021-06-10 00:00:00.789')
+checkData(sql,6,0,'2021-06-10 00:00:00.999')
+checkData(sql,0,1,1)
+checkData(sql,1,1,2)
+checkData(sql,2,1,4)
+checkData(sql,5,1,7)
+
+console.log('*******************************************')
+
+// offfical query result to show
+// console.log('this is area about fetch all data')
+// var query = c1.query(sql)
+// var promise = query.execute();
+// promise.then(function(result) {
+// result.pretty();
+// });
+
+console.log('*******************************************')
+c1.execute('use db')
+
+sql2 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;'
+checkData(sql2,0,0,1)
+
+sql3 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';'
+checkData(sql3,0,0,2)
+
+sql4 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;'
+checkData(sql4,0,0,2)
+
+sql5 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';'
+checkData(sql5,0,0,2)
+
+sql6 = 'select count(*) from tb where ts > 1623254400400000000;'
+checkData(sql6,0,0,1)
+
+sql7 = 'select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';'
+checkData(sql7,0,0,6)
+
+sql8 = 'select count(*) from tb where ts > now + 400000000b;'
+c1.execute(sql8)
+
+sql9 = 'select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';'
+checkData(sql9,0,0,7)
+
+sql10 = 'select count(*) from tb where ts <= 1623254400300000000;'
+checkData(sql10,0,0,5)
+
+sql11 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';'
+c1.execute(sql11)
+
+sql12 = 'select count(*) from tb where ts = 1623254400150000000;'
+checkData(sql12,0,0,1)
+
+sql13 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';'
+checkData(sql13,0,0,1)
+
+sql14 = 'select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;'
+checkData(sql14,0,0,6)
+
+sql15 = 'select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';'
+checkData(sql15,0,0,3)
+
+sql16 = 'select avg(speed) from tb interval(5000000000b);'
+checkData(sql16,0,0,'2021-06-10 00:00:00.000000000')
+
+sql17 = 'select avg(speed) from tb interval(100000000b)'
+checkData(sql17,0,1,3.6666666666666665)
+checkData(sql17,1,1,4.000000000)
+
+checkData(sql17,2,0,'2021-06-10 00:00:00.300000000')
+checkData(sql17,3,0,'2021-06-10 00:00:00.900000000')
+
+console.log("print break ")
+
+// sql18 = 'select avg(speed) from tb interval(999b)'
+// c1.execute(sql18)
+
+console.log("print break2 ")
+sql19 = 'select avg(speed) from tb interval(1u);'
+checkData(sql19,2,1,2.000000000)
+checkData(sql19,3,0,'2021-06-10 00:00:00.299999000')
+
+sql20 = 'select avg(speed) from tb interval(100000000b) sliding (100000000b);'
+checkData(sql20,2,1,4.000000000)
+checkData(sql20,3,0,'2021-06-10 00:00:00.900000000')
+
+sql21 = 'select last(*) from tb;'
+checkData(sql21,0,0,'2021-06-10 00:00:00.999999999')
+
+sql22 = 'select first(*) from tb;'
+checkData(sql22,0,0,'2021-06-10 00:00:00.100000001')
+
+// timezone support
+
+console.log('testing nanosecond support in other timestamps')
+
+c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);')
+c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');')
+c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);')
+c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);')
+c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);')
+c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);')
+c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);')
+
+sql23 = 'select * from tb2;'
+checkData(sql23,0,0,'2021-06-10 00:00:00.100000001')
+checkData(sql23,1,0,'2021-06-10 00:00:00.150000000')
+checkData(sql23,2,1,4)
+checkData(sql23,3,1,3)
+checkData(sql23,4,2,'2021-06-11 00:00:00.300000001')
+checkData(sql23,5,2,'2021-06-13 00:00:00.999999999')
+
+sql24 = 'select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';'
+checkData(sql24,0,0,6)
+
+sql25 = 'select count(*) from tb2 where ts2 <= 1623340800400000000;'
+checkData(sql25,0,0,5)
+
+sql26 = 'select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';'
+checkData(sql26,0,0,1)
+
+sql27 = 'select count(*) from tb2 where ts2 = 1623340800300000001;'
+checkData(sql27,0,0,1)
+
+sql28 = 'select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;'
+checkData(sql28,0,0,5)
+
+sql29 = 'select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';'
+checkData(sql29,0,0,3)
+
+sql30 = 'select count(*) from tb2 where ts2 <> 1623513600999999999;'
+checkData(sql30,0,0,5)
+
+sql31 = 'select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';'
+checkData(sql31,0,0,5)
+
+sql32 = 'select count(*) from tb2 where ts2 != 1623513600999999999;'
+checkData(sql32,0,0,5)
+
+sql33 = 'select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';'
+checkData(sql33,0,0,5)
+
+c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);')
+
+sql34 = 'select count(*) from tb2;'
+checkData(sql34,0,0,7)
+
+
+// check timezone support
+
+c1.execute('use db;')
+c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);')
+c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+07:00" , 1.0);' )
+sql35 = 'select first(*) from stb1;'
+checkData(sql35,0,0,'2021-06-10 01:00:00.123456789')
+
+c1.execute('use usdb;')
+c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);')
+c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' )
+sql36 = 'select first(*) from stb1;'
+checkData(sql36,0,0,'2021-06-10 01:00:00.123456')
+
+c1.execute('use msdb;')
+c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);')
+c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' )
+sql36 = 'select first(*) from stb1;'
+checkData(sql36,0,0,'2021-06-10 01:00:00.123')
+
+
+
+
+
+
+
diff --git a/tests/connectorTest/nodejsTest/nodetaos/cinterface.js b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js
new file mode 100644
index 0000000000000000000000000000000000000000..03d27e5593ccb15d8ff47cd3c3dedba765d14fc1
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js
@@ -0,0 +1,587 @@
+/**
+ * C Interface with TDengine Module
+ * @module CTaosInterface
+ */
+
+const ref = require('ref-napi');
+const os = require('os');
+const ffi = require('ffi-napi');
+const ArrayType = require('ref-array-napi');
+const Struct = require('ref-struct-napi');
+const FieldTypes = require('./constants');
+const errors = require('./error');
+const TaosObjects = require('./taosobjects');
+const { NULL_POINTER } = require('ref-napi');
+
+module.exports = CTaosInterface;
+
+function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let time = data.readInt64LE(currOffset);
+ currOffset += nbytes;
+ res.push(new TaosObjects.TaosTimestamp(time, precision));
+ }
+ return res;
+}
+function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = new Array(data.length);
+ for (let i = 0; i < data.length; i++) {
+ if (data[i] == 0) {
+ res[i] = false;
+ }
+ else if (data[i] == 1) {
+ res[i] = true;
+ }
+ else if (data[i] == FieldTypes.C_BOOL_NULL) {
+ res[i] = null;
+ }
+ }
+ return res;
+}
+function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readIntLE(currOffset, 1);
+ res.push(d == FieldTypes.C_TINYINT_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readIntLE(currOffset, 2);
+ res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readInt32LE(currOffset);
+ res.push(d == FieldTypes.C_INT_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readInt64LE(currOffset);
+ res.push(d == FieldTypes.C_BIGINT_NULL ? null : BigInt(d));
+ currOffset += nbytes;
+ }
+ return res;
+}
+function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = parseFloat(data.readFloatLE(currOffset).toFixed(5));
+ res.push(isNaN(d) ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = parseFloat(data.readDoubleLE(currOffset).toFixed(16));
+ res.push(isNaN(d) ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+
+function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let len = data.readIntLE(currOffset, 2);
+ let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column;
+ res.push(dataEntry.toString("utf-8"));
+ currOffset += nbytes;
+ }
+ return res;
+}
+
+// Object with all the relevant converters from pblock data to javascript readable data
+let convertFunctions = {
+ [FieldTypes.C_BOOL]: convertBool,
+ [FieldTypes.C_TINYINT]: convertTinyint,
+ [FieldTypes.C_SMALLINT]: convertSmallint,
+ [FieldTypes.C_INT]: convertInt,
+ [FieldTypes.C_BIGINT]: convertBigint,
+ [FieldTypes.C_FLOAT]: convertFloat,
+ [FieldTypes.C_DOUBLE]: convertDouble,
+ [FieldTypes.C_BINARY]: convertNchar,
+ [FieldTypes.C_TIMESTAMP]: convertTimestamp,
+ [FieldTypes.C_NCHAR]: convertNchar
+}
+
+// Define TaosField structure
+var char_arr = ArrayType(ref.types.char);
+var TaosField = Struct({
+ 'name': char_arr,
+});
+TaosField.fields.name.type.size = 65;
+TaosField.defineProperty('type', ref.types.char);
+TaosField.defineProperty('bytes', ref.types.short);
+
+
+/**
+ *
+ * @param {Object} config - Configuration options for the interface
+ * @return {CTaosInterface}
+ * @class CTaosInterface
+ * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to
+ * access this class directly and use it unless you understand what these functions do.
+ */
+function CTaosInterface(config = null, pass = false) {
+ ref.types.char_ptr = ref.refType(ref.types.char);
+ ref.types.void_ptr = ref.refType(ref.types.void);
+ ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
+ /*Declare a bunch of functions first*/
+ /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */
+
+ if ('win32' == os.platform()) {
+ taoslibname = 'taos';
+ } else {
+ taoslibname = 'libtaos';
+ }
+ this.libtaos = ffi.Library(taoslibname, {
+ 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]],
+ 'taos_init': [ref.types.void, []],
+ //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port)
+ 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]],
+ //void taos_close(TAOS *taos)
+ 'taos_close': [ref.types.void, [ref.types.void_ptr]],
+ //int *taos_fetch_lengths(TAOS_RES *res);
+ 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]],
+ //int taos_query(TAOS *taos, char *sqlstr)
+ 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]],
+ //int taos_affected_rows(TAOS_RES *res)
+ 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]],
+ //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows)
+ 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]],
+ //int taos_num_fields(TAOS_RES *res);
+ 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]],
+ //TAOS_ROW taos_fetch_row(TAOS_RES *res)
+ //TAOS_ROW is void **, but we set the return type as a reference instead to get the row
+ 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]],
+ 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
+ //int taos_result_precision(TAOS_RES *res)
+ 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]],
+ //void taos_free_result(TAOS_RES *res)
+ 'taos_free_result': [ref.types.void, [ref.types.void_ptr]],
+ //int taos_field_count(TAOS *taos)
+ 'taos_field_count': [ref.types.int, [ref.types.void_ptr]],
+ //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)
+ 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]],
+ //int taos_errno(TAOS *taos)
+ 'taos_errno': [ref.types.int, [ref.types.void_ptr]],
+ //char *taos_errstr(TAOS *taos)
+ 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]],
+ //void taos_stop_query(TAOS_RES *res);
+ 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]],
+ //char *taos_get_server_info(TAOS *taos);
+ 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]],
+ //char *taos_get_client_info();
+ 'taos_get_client_info': [ref.types.char_ptr, []],
+
+ // ASYNC
+ // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
+ 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]],
+ // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
+ 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]],
+
+ // Subscription
+ //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)
+ 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]],
+ // TAOS_RES *taos_consume(TAOS_SUB *tsub)
+ 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]],
+ //void taos_unsubscribe(TAOS_SUB *tsub);
+ 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]],
+
+ // Continuous Query
+ //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
+ // int64_t stime, void *param, void (*callback)(void *));
+ 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]],
+ //void taos_close_stream(TAOS_STREAM *tstr);
+ 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]]
+
+ });
+ if (pass == false) {
+ if (config == null) {
+ this._config = ref.alloc(ref.types.char_ptr, ref.NULL);
+ }
+ else {
+ try {
+ this._config = ref.allocCString(config);
+ }
+ catch (err) {
+ throw "Attribute Error: config is expected as a str";
+ }
+ }
+ if (config != null) {
+ this.libtaos.taos_options(3, this._config);
+ }
+ this.libtaos.taos_init();
+ }
+ return this;
+}
+CTaosInterface.prototype.config = function config() {
+ return this._config;
+}
+CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) {
+ let _host, _user, _password, _db, _port;
+ try {
+ _host = host != null ? ref.allocCString(host) : ref.NULL;
+ }
+ catch (err) {
+ throw "Attribute Error: host is expected as a str";
+ }
+ try {
+ _user = ref.allocCString(user)
+ }
+ catch (err) {
+ throw "Attribute Error: user is expected as a str";
+ }
+ try {
+ _password = ref.allocCString(password);
+ }
+ catch (err) {
+ throw "Attribute Error: password is expected as a str";
+ }
+ try {
+ _db = db != null ? ref.allocCString(db) : ref.NULL;
+ }
+ catch (err) {
+ throw "Attribute Error: db is expected as a str";
+ }
+ try {
+ _port = ref.alloc(ref.types.int, port);
+ }
+ catch (err) {
+ throw TypeError("port is expected as an int")
+ }
+ let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port);
+ if (ref.isNull(connection)) {
+ throw new errors.TDError('Failed to connect to TDengine');
+ }
+ else {
+ console.log('Successfully connected to TDengine');
+ }
+ return connection;
+}
+CTaosInterface.prototype.close = function close(connection) {
+ this.libtaos.taos_close(connection);
+ console.log("Connection is closed");
+}
+CTaosInterface.prototype.query = function query(connection, sql) {
+ return this.libtaos.taos_query(connection, ref.allocCString(sql));
+}
+CTaosInterface.prototype.affectedRows = function affectedRows(result) {
+ return this.libtaos.taos_affected_rows(result);
+}
+CTaosInterface.prototype.useResult = function useResult(result) {
+
+ let fields = [];
+ let pfields = this.fetchFields(result);
+ if (ref.isNull(pfields) == false) {
+ pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0);
+ for (let i = 0; i < pfields.length; i += 68) {
+ //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 65, i)),
+ type: pfields[i + 65],
+ bytes: pfields[i + 66]
+ })
+ }
+ }
+ return fields;
+}
+CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
+ let pblock = ref.NULL_POINTER;
+ let num_of_rows = this.libtaos.taos_fetch_block(result, pblock);
+ if (ref.isNull(pblock.deref()) == true) {
+ return { block: null, num_of_rows: 0 };
+ }
+
+ var fieldL = this.libtaos.taos_fetch_lengths(result);
+ let precision = this.libtaos.taos_result_precision(result);
+
+ var fieldlens = [];
+
+ if (ref.isNull(fieldL) == false) {
+ for (let i = 0; i < fields.length; i++) {
+ let plen = ref.reinterpret(fieldL, 4, i * 4);
+ let len = plen.readInt32LE(0);
+ fieldlens.push(len);
+ }
+ }
+
+ let blocks = new Array(fields.length);
+ blocks.fill(null);
+ num_of_rows = Math.abs(num_of_rows);
+ let offset = 0;
+ let ptr = pblock.deref();
+
+ for (let i = 0; i < fields.length; i++) {
+ pdata = ref.reinterpret(ptr, 8, i * 8);
+ if (ref.isNull(pdata.readPointer())) {
+ blocks[i] = new Array();
+ } else {
+ pdata = ref.ref(pdata.readPointer());
+ if (!convertFunctions[fields[i]['type']]) {
+ throw new errors.DatabaseError("Invalid data type returned from database");
+ }
+ blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision);
+ }
+ }
+ return { blocks: blocks, num_of_rows }
+}
+CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) {
+ let row = this.libtaos.taos_fetch_row(result);
+ return row;
+}
+CTaosInterface.prototype.freeResult = function freeResult(result) {
+ this.libtaos.taos_free_result(result);
+ result = null;
+}
+/** Number of fields returned in this result handle, must use with async */
+CTaosInterface.prototype.numFields = function numFields(result) {
+ return this.libtaos.taos_num_fields(result);
+}
+// Fetch fields count by connection, the latest query
+CTaosInterface.prototype.fieldsCount = function fieldsCount(result) {
+ return this.libtaos.taos_field_count(result);
+}
+CTaosInterface.prototype.fetchFields = function fetchFields(result) {
+ return this.libtaos.taos_fetch_fields(result);
+}
+CTaosInterface.prototype.errno = function errno(result) {
+ return this.libtaos.taos_errno(result);
+}
+CTaosInterface.prototype.errStr = function errStr(result) {
+ return ref.readCString(this.libtaos.taos_errstr(result));
+}
+// Async
+CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) {
+ // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param)
+ callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback);
+ this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param);
+ return param;
+}
+/** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form
+ * Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive
+ * function yourself using the libtaos.taos_fetch_rows_a function
+ */
+CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, param = ref.ref(ref.NULL)) {
+ // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
+ var cti = this;
+ // wrap callback with a function so interface can access the numOfRows value, needed in order to properly process the binary data
+ let asyncCallbackWrapper = function (param2, result2, numOfRows2) {
+ // Data preparation to pass to cursor. Could be bottleneck in query execution callback times.
+ let row = cti.libtaos.taos_fetch_row(result2);
+ let fields = cti.fetchFields_a(result2);
+
+ let precision = cti.libtaos.taos_result_precision(result2);
+ let blocks = new Array(fields.length);
+ blocks.fill(null);
+ numOfRows2 = Math.abs(numOfRows2);
+ let offset = 0;
+ var fieldL = cti.libtaos.taos_fetch_lengths(result);
+ var fieldlens = [];
+ if (ref.isNull(fieldL) == false) {
+
+ for (let i = 0; i < fields.length; i++) {
+ let plen = ref.reinterpret(fieldL, 8, i * 8);
+ let len = ref.get(plen, 0, ref.types.int32);
+ fieldlens.push(len);
+ }
+ }
+ if (numOfRows2 > 0) {
+ for (let i = 0; i < fields.length; i++) {
+ if (ref.isNull(pdata.readPointer())) {
+ blocks[i] = new Array();
+ } else {
+ if (!convertFunctions[fields[i]['type']]) {
+ throw new errors.DatabaseError("Invalid data type returned from database");
+ }
+ let prow = ref.reinterpret(row, 8, i * 8);
+ prow = prow.readPointer();
+ prow = ref.ref(prow);
+ blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision);
+ //offset += fields[i]['bytes'] * numOfRows2;
+ }
+ }
+ }
+ callback(param2, result2, numOfRows2, blocks);
+ }
+ asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper);
+ this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param);
+ return param;
+}
+// Fetch field meta data by result handle
+CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) {
+ let pfields = this.fetchFields(result);
+ let pfieldscount = this.numFields(result);
+ let fields = [];
+ if (ref.isNull(pfields) == false) {
+ pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0);
+ for (let i = 0; i < pfields.length; i += 68) {
+ //0 - 64 = name //65 = type, 66 - 67 = bytes
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 65, i)),
+ type: pfields[i + 65],
+ bytes: pfields[i + 66]
+ })
+ }
+ }
+ return fields;
+}
+// Stop a query by result handle
+CTaosInterface.prototype.stopQuery = function stopQuery(result) {
+ if (result != null) {
+ this.libtaos.taos_stop_query(result);
+ }
+ else {
+ throw new errors.ProgrammingError("No result handle passed to stop query");
+ }
+}
+CTaosInterface.prototype.getServerInfo = function getServerInfo(connection) {
+ return ref.readCString(this.libtaos.taos_get_server_info(connection));
+}
+CTaosInterface.prototype.getClientInfo = function getClientInfo() {
+ return ref.readCString(this.libtaos.taos_get_client_info());
+}
+
+// Subscription
+CTaosInterface.prototype.subscribe = function subscribe(connection, restart, topic, sql, interval) {
+ let topicOrig = topic;
+ let sqlOrig = sql;
+ try {
+ sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ }
+ catch (err) {
+ throw "Attribute Error: sql is expected as a str";
+ }
+ try {
+ topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL);
+ }
+ catch (err) {
+ throw TypeError("topic is expected as a str");
+ }
+
+ restart = ref.alloc(ref.types.int, restart);
+
+ let subscription = this.libtaos.taos_subscribe(connection, restart, topic, sql, null, null, interval);
+ if (ref.isNull(subscription)) {
+ throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig);
+ }
+ else {
+ console.log('Successfully subscribed to TDengine - Topic: ' + topicOrig);
+ }
+ return subscription;
+}
+
+CTaosInterface.prototype.consume = function consume(subscription) {
+ let result = this.libtaos.taos_consume(subscription);
+ let fields = [];
+ let pfields = this.fetchFields(result);
+ if (ref.isNull(pfields) == false) {
+ pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0);
+ for (let i = 0; i < pfields.length; i += 68) {
+ //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
+ fields.push({
+ name: ref.readCString(ref.reinterpret(pfields, 64, i)),
+ bytes: pfields[i + 64],
+ type: pfields[i + 66]
+ })
+ }
+ }
+
+ let data = [];
+ while (true) {
+ let { blocks, num_of_rows } = this.fetchBlock(result, fields);
+ if (num_of_rows == 0) {
+ break;
+ }
+ for (let i = 0; i < num_of_rows; i++) {
+ data.push([]);
+ let rowBlock = new Array(fields.length);
+ for (let j = 0; j < fields.length; j++) {
+ rowBlock[j] = blocks[j][i];
+ }
+ data[data.length - 1] = (rowBlock);
+ }
+ }
+ return { data: data, fields: fields, result: result };
+}
+CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
+ //void taos_unsubscribe(TAOS_SUB *tsub);
+ this.libtaos.taos_unsubscribe(subscription);
+}
+
+// Continuous Query
+CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) {
+ try {
+ sql = ref.allocCString(sql);
+ }
+ catch (err) {
+ throw "Attribute Error: sql string is expected as a str";
+ }
+ var cti = this;
+ let asyncCallbackWrapper = function (param2, result2, row) {
+ let fields = cti.fetchFields_a(result2);
+ let precision = cti.libtaos.taos_result_precision(result2);
+ let blocks = new Array(fields.length);
+ blocks.fill(null);
+ let numOfRows2 = 1;
+ let offset = 0;
+ if (numOfRows2 > 0) {
+ for (let i = 0; i < fields.length; i++) {
+ if (!convertFunctions[fields[i]['type']]) {
+ throw new errors.DatabaseError("Invalid data type returned from database");
+ }
+ blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision);
+ offset += fields[i]['bytes'] * numOfRows2;
+ }
+ }
+ callback(param2, result2, blocks, fields);
+ }
+ asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper);
+ asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback);
+ let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper);
+ if (ref.isNull(streamHandle)) {
+ throw new errors.TDError('Failed to open a stream with TDengine');
+ return false;
+ }
+ else {
+ console.log("Succesfully opened stream");
+ return streamHandle;
+ }
+}
+CTaosInterface.prototype.closeStream = function closeStream(stream) {
+ this.libtaos.taos_close_stream(stream);
+ console.log("Closed stream");
+}
diff --git a/tests/connectorTest/nodejsTest/nodetaos/connection.js b/tests/connectorTest/nodejsTest/nodetaos/connection.js
new file mode 100644
index 0000000000000000000000000000000000000000..08186f87053ad0ed0982ec8941f0cf38c4ad0467
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/connection.js
@@ -0,0 +1,84 @@
+const TDengineCursor = require('./cursor')
+const CTaosInterface = require('./cinterface')
+module.exports = TDengineConnection;
+
+/**
+ * TDengine Connection Class
+ * @param {object} options - Options for configuring the connection with TDengine
+ * @return {TDengineConnection}
+ * @class TDengineConnection
+ * @constructor
+ * @example
+ * //Initialize a new connection
+ * var conn = new TDengineConnection({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
+ *
+ */
+function TDengineConnection(options) {
+ this._conn = null;
+ this._host = null;
+ this._user = "root"; //The default user
+ this._password = "taosdata"; //The default password
+ this._database = null;
+ this._port = 0;
+ this._config = null;
+ this._chandle = null;
+ this._configConn(options)
+ return this;
+}
+/**
+ * Configure the connection to TDengine
+ * @private
+ * @memberof TDengineConnection
+ */
+TDengineConnection.prototype._configConn = function _configConn(options) {
+ if (options['host']) {
+ this._host = options['host'];
+ }
+ if (options['user']) {
+ this._user = options['user'];
+ }
+ if (options['password']) {
+ this._password = options['password'];
+ }
+ if (options['database']) {
+ this._database = options['database'];
+ }
+ if (options['port']) {
+ this._port = options['port'];
+ }
+ if (options['config']) {
+ this._config = options['config'];
+ }
+ this._chandle = new CTaosInterface(this._config);
+ this._conn = this._chandle.connect(this._host, this._user, this._password, this._database, this._port);
+}
+/** Close the connection to TDengine */
+TDengineConnection.prototype.close = function close() {
+ this._chandle.close(this._conn);
+}
+/**
+ * Initialize a new cursor to interact with TDengine with
+ * @return {TDengineCursor}
+ */
+TDengineConnection.prototype.cursor = function cursor() {
+ //Pass the connection object to the cursor
+ return new TDengineCursor(this);
+}
+TDengineConnection.prototype.commit = function commit() {
+ return this;
+}
+TDengineConnection.prototype.rollback = function rollback() {
+ return this;
+}
+/**
+ * Clear the results from connector
+ * @private
+ */
+/*
+ TDengineConnection.prototype._clearResultSet = function _clearResultSet() {
+ var result = this._chandle.useResult(this._conn).result;
+ if (result) {
+ this._chandle.freeResult(result)
+ }
+}
+*/
diff --git a/tests/connectorTest/nodejsTest/nodetaos/constants.js b/tests/connectorTest/nodejsTest/nodetaos/constants.js
new file mode 100644
index 0000000000000000000000000000000000000000..cd6a0c9fbaff51e7f0ecd3ab06907b7b1fb7dcb1
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/constants.js
@@ -0,0 +1,76 @@
+/**
+ * Contains the the definitions/values assigned to various field types
+ * @module FieldTypes
+ */
+/**
+ * TDengine Field Types and their type codes
+ * @typedef {Object} FieldTypes
+ * @global
+ * @property {number} C_NULL - Null
+ * @property {number} C_BOOL - Boolean. Note, 0x02 is the C_BOOL_NULL value.
+ * @property {number} C_TINYINT - Tiny Int, values in the range [-2^7+1, 2^7-1]. Note, -2^7 has been used as the C_TINYINT_NULL value
+ * @property {number} C_SMALLINT - Small Int, values in the range [-2^15+1, 2^15-1]. Note, -2^15 has been used as the C_SMALLINT_NULL value
+ * @property {number} C_INT - Int, values in the range [-2^31+1, 2^31-1]. Note, -2^31 has been used as the C_INT_NULL value
+ * @property {number} C_BIGINT - Big Int, values in the range [-2^59, 2^59].
+ * @property {number} C_FLOAT - Float, values in the range [-3.4E38, 3.4E38], accurate up to 6-7 decimal places.
+ * @property {number} C_DOUBLE - Double, values in the range [-1.7E308, 1.7E308], accurate up to 15-16 decimal places.
+ * @property {number} C_BINARY - Binary, encoded in utf-8.
+ * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after
+ 1970-01-01 08:00:00.000 GMT.
+ * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string.
+ *
+ *
+ *
+ * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result).
+ * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result).
+ */
+module.exports = {
+ C_NULL : 0,
+ C_BOOL : 1,
+ C_TINYINT : 2,
+ C_SMALLINT : 3,
+ C_INT : 4,
+ C_BIGINT : 5,
+ C_FLOAT : 6,
+ C_DOUBLE : 7,
+ C_BINARY : 8,
+ C_TIMESTAMP : 9,
+ C_NCHAR : 10,
+ // NULL value definition
+ // NOTE: These values should change according to C definition in tsdb.h
+ C_BOOL_NULL : 2,
+ C_TINYINT_NULL : -128,
+ C_SMALLINT_NULL : -32768,
+ C_INT_NULL : -2147483648,
+ C_BIGINT_NULL : -9223372036854775808,
+ C_FLOAT_NULL : 2146435072,
+ C_DOUBLE_NULL : -9223370937343148032,
+ C_NCHAR_NULL : 4294967295,
+ C_BINARY_NULL : 255,
+ C_TIMESTAMP_MILLI : 0,
+ C_TIMESTAMP_MICRO : 1,
+ getType,
+}
+
+const typeCodesToName = {
+ 0 : 'Null',
+ 1 : 'Boolean',
+ 2 : 'Tiny Int',
+ 3 : 'Small Int',
+ 4 : 'Int',
+ 5 : 'Big Int',
+ 6 : 'Float',
+ 7 : 'Double',
+ 8 : 'Binary',
+ 9 : 'Timestamp',
+ 10 : 'Nchar',
+}
+
+/**
+ * @function
+ * @param {number} typecode - The code to get the name of the type for
+ * @return {string} Name of the field type
+ */
+function getType(typecode) {
+ return typeCodesToName[typecode];
+}
diff --git a/tests/connectorTest/nodejsTest/nodetaos/cursor.js b/tests/connectorTest/nodejsTest/nodetaos/cursor.js
new file mode 100644
index 0000000000000000000000000000000000000000..f879d89d487eae9290fd9fc70259699f27937928
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/cursor.js
@@ -0,0 +1,476 @@
+const ref = require('ref-napi');
+require('./globalfunc.js')
+const CTaosInterface = require('./cinterface')
+const errors = require('./error')
+const TaosQuery = require('./taosquery')
+const { PerformanceObserver, performance } = require('perf_hooks');
+module.exports = TDengineCursor;
+
+/**
+ * @typedef {Object} Buffer - A Node.js buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details
+ * @global
+ */
+
+/**
+ * @class TDengineCursor
+ * @classdesc The TDengine Cursor works directly with the C Interface which works with TDengine. It refrains from
+ * returning parsed data and majority of functions return the raw data such as cursor.fetchall() as compared to the TaosQuery class which
+ * has functions that "prettify" the data and add more functionality and can be used through cursor.query("your query"). Instead of
+ * promises, the class and its functions use callbacks.
+ * @param {TDengineConnection} - The TDengine Connection this cursor uses to interact with TDengine
+ * @property {data} - Latest retrieved data from query execution. It is an empty array by default
+ * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved
+ * @since 1.0.0
+ */
+function TDengineCursor(connection = null) {
+ //All parameters are store for sync queries only.
+ this._rowcount = -1;
+ this._connection = null;
+ this._result = null;
+ this._fields = null;
+ this.data = [];
+ this.fields = null;
+ if (connection != null) {
+ this._connection = connection
+ this._chandle = connection._chandle //pass through, just need library loaded.
+ }
+ else {
+ throw new errors.ProgrammingError("A TDengineConnection object is required to be passed to the TDengineCursor");
+ }
+
+}
+/**
+ * Get the row counts of the latest query
+ * @since 1.0.0
+ * @return {number} Rowcount
+ */
+TDengineCursor.prototype.rowcount = function rowcount() {
+ return this._rowcount;
+}
+/**
+ * Close the cursor by setting its connection to null and freeing results from the connection and resetting the results it has stored
+ * @return {boolean} Whether or not the cursor was succesfully closed
+ * @since 1.0.0
+ */
+TDengineCursor.prototype.close = function close() {
+ if (this._connection == null) {
+ return false;
+ }
+ this._connection._clearResultSet();
+ this._reset_result();
+ this._connection = null;
+ return true;
+}
+/**
+ * Create a TaosQuery object to perform a query to TDengine and retrieve data.
+ * @param {string} operation - The operation string to perform a query on
+ * @param {boolean} execute - Whether or not to immedietely perform the query. Default is false.
+ * @return {TaosQuery | Promise} A TaosQuery object
+ * @example
+ * var query = cursor.query("select count(*) from meterinfo.meters");
+ * query.execute();
+ * @since 1.0.6
+ */
+TDengineCursor.prototype.query = function query(operation, execute = false) {
+ return new TaosQuery(operation, this, execute);
+}
+
+/**
+ * Execute a query. Also stores all the field meta data returned from the query into cursor.fields. It is preferable to use cursor.query() to create
+ * queries and execute them instead of using the cursor object directly.
+ * @param {string} operation - The query operation to execute in the taos shell
+ * @param {Object} options - Execution options object. quiet : true turns off logging from queries
+ * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..."
+ * @param {function} callback - A callback function to execute after the query is made to TDengine
+ * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
+ * @since 1.0.0
+ */
+TDengineCursor.prototype.execute = function execute(operation, options, callback) {
+ if (operation == undefined) {
+ throw new errors.ProgrammingError('No operation passed as argument');
+ return null;
+ }
+
+ if (typeof options == 'function') {
+ callback = options;
+ }
+ if (typeof options != 'object') options = {}
+ if (this._connection == null) {
+ throw new errors.ProgrammingError('Cursor is not connected');
+ }
+
+ this._reset_result();
+
+ let stmt = operation;
+ let time = 0;
+ let res;
+ if (options['quiet'] != true) {
+ const obs = new PerformanceObserver((items) => {
+ time = items.getEntries()[0].duration;
+ performance.clearMarks();
+ });
+ obs.observe({ entryTypes: ['measure'] });
+ performance.mark('A');
+ this._result = this._chandle.query(this._connection._conn, stmt);
+ performance.mark('B');
+ performance.measure('query', 'A', 'B');
+ }
+ else {
+ this._result = this._chandle.query(this._connection._conn, stmt);
+ }
+ res = this._chandle.errno(this._result);
+ if (res == 0) {
+ let fieldCount = this._chandle.fieldsCount(this._result);
+ if (fieldCount == 0) {
+ let affectedRowCount = this._chandle.affectedRows(this._result);
+ let response = this._createAffectedResponse(affectedRowCount, time)
+ if (options['quiet'] != true) {
+ console.log(response);
+ }
+ wrapCB(callback);
+ return affectedRowCount; //return num of affected rows, common with insert, use statements
+ }
+ else {
+ this._fields = this._chandle.useResult(this._result);
+ this.fields = this._fields;
+ wrapCB(callback);
+
+ return this._result; //return a pointer to the result
+ }
+ }
+ else {
+ throw new errors.ProgrammingError(this._chandle.errStr(this._result))
+ }
+
+}
+TDengineCursor.prototype._createAffectedResponse = function (num, time) {
+ return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)";
+}
+TDengineCursor.prototype._createSetResponse = function (num, time) {
+ return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)";
+}
+TDengineCursor.prototype.executemany = function executemany() {
+
+}
+TDengineCursor.prototype.fetchone = function fetchone() {
+
+}
+TDengineCursor.prototype.fetchmany = function fetchmany() {
+
+}
+/**
+ * Fetches all results from a query and also stores results into cursor.data. It is preferable to use cursor.query() to create
+ * queries and execute them instead of using the cursor object directly.
+ * @param {function} callback - callback function executing on the complete fetched data
+ * @return {Array} The resultant array, with entries corresponding to each retreived row from the query results, sorted in
+ * order by the field name ordering in the table.
+ * @since 1.0.0
+ * @example
+ * cursor.execute('select * from db.table');
+ * var data = cursor.fetchall(function(results) {
+ * results.forEach(row => console.log(row));
+ * })
+ */
+TDengineCursor.prototype.fetchall = function fetchall(options, callback) {
+ if (this._result == null || this._fields == null) {
+ throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first");
+ }
+
+ let num_of_rows = this._chandle.affectedRows(this._result);
+ let data = new Array(num_of_rows);
+
+ this._rowcount = 0;
+
+ let time = 0;
+ const obs = new PerformanceObserver((items) => {
+ time += items.getEntries()[0].duration;
+ performance.clearMarks();
+ });
+ obs.observe({ entryTypes: ['measure'] });
+ performance.mark('A');
+ while (true) {
+ let blockAndRows = this._chandle.fetchBlock(this._result, this._fields);
+ // console.log(blockAndRows);
+ // break;
+ let block = blockAndRows.blocks;
+ let num_of_rows = blockAndRows.num_of_rows;
+ if (num_of_rows == 0) {
+ break;
+ }
+ this._rowcount += num_of_rows;
+ let numoffields = this._fields.length;
+ for (let i = 0; i < num_of_rows; i++) {
+ // data.push([]);
+
+ let rowBlock = new Array(numoffields);
+ for (let j = 0; j < numoffields; j++) {
+ rowBlock[j] = block[j][i];
+ }
+ data[this._rowcount - num_of_rows + i] = (rowBlock);
+ // data.push(rowBlock);
+ }
+
+ }
+
+ performance.mark('B');
+ performance.measure('query', 'A', 'B');
+ let response = this._createSetResponse(this._rowcount, time)
+ console.log(response);
+
+ // this._connection._clearResultSet();
+ let fields = this.fields;
+ this._reset_result();
+ this.data = data;
+ this.fields = fields;
+
+ wrapCB(callback, data);
+
+ return data;
+}
+/**
+ * Asynchrnously execute a query to TDengine. NOTE, insertion requests must be done in sync if on the same table.
+ * @param {string} operation - The query operation to execute in the taos shell
+ * @param {Object} options - Execution options object. quiet : true turns off logging from queries
+ * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..."
+ * @param {function} callback - A callback function to execute after the query is made to TDengine
+ * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query
+ * @since 1.0.0
+ */
+TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) {
+ if (operation == undefined) {
+ throw new errors.ProgrammingError('No operation passed as argument');
+ return null;
+ }
+ if (typeof options == 'function') {
+ //we expect the parameter after callback to be param
+ param = callback;
+ callback = options;
+ }
+ if (typeof options != 'object') options = {}
+ if (this._connection == null) {
+ throw new errors.ProgrammingError('Cursor is not connected');
+ }
+ if (typeof callback != 'function') {
+ throw new errors.ProgrammingError("No callback function passed to execute_a function");
+ }
+ // Async wrapper for callback;
+ var cr = this;
+
+ let asyncCallbackWrapper = function (param2, res2, resCode) {
+ if (typeof callback == 'function') {
+ callback(param2, res2, resCode);
+ }
+
+ if (resCode >= 0) {
+ // let fieldCount = cr._chandle.numFields(res2);
+ // if (fieldCount == 0) {
+ // //cr._chandle.freeResult(res2);
+ // return res2;
+ // }
+ // else {
+ // return res2;
+ // }
+ return res2;
+
+ }
+ else {
+ throw new errors.ProgrammingError("Error occuring with use of execute_a async function. Status code was returned with failure");
+ }
+ }
+
+ let stmt = operation;
+ let time = 0;
+
+ // Use ref module to write to buffer in cursor.js instead of taosquery to maintain a difference in levels. Have taosquery stay high level
+ // through letting it pass an object as param
+ var buf = ref.alloc('Object');
+ ref.writeObject(buf, 0, param);
+ const obs = new PerformanceObserver((items) => {
+ time = items.getEntries()[0].duration;
+ performance.clearMarks();
+ });
+ obs.observe({ entryTypes: ['measure'] });
+ performance.mark('A');
+ this._chandle.query_a(this._connection._conn, stmt, asyncCallbackWrapper, buf);
+ performance.mark('B');
+ performance.measure('query', 'A', 'B');
+ return param;
+
+
+}
+/**
+ * Fetches all results from an async query. It is preferable to use cursor.query_a() to create
+ * async queries and execute them instead of using the cursor object directly.
+ * @param {Object} options - An options object containing options for this function
+ * @param {function} callback - callback function that is callbacked on the COMPLETE fetched data (it is calledback only once!).
+ * Must be of form function (param, result, rowCount, rowData)
+ * @param {Object} param - A parameter that is also passed to the main callback function. Important! Param must be an object, and the key "data" cannot be used
+ * @return {{param:Object, result:Buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle.
+ * @since 1.2.0
+ * @example
+ * cursor.execute('select * from db.table');
+ * var data = cursor.fetchall(function(results) {
+ * results.forEach(row => console.log(row));
+ * })
+ */
+TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) {
+ if (typeof options == 'function') {
+ //we expect the parameter after callback to be param
+ param = callback;
+ callback = options;
+ }
+ if (typeof options != 'object') options = {}
+ if (this._connection == null) {
+ throw new errors.ProgrammingError('Cursor is not connected');
+ }
+ if (typeof callback != 'function') {
+ throw new errors.ProgrammingError('No callback function passed to fetchall_a function')
+ }
+ if (param.data) {
+ throw new errors.ProgrammingError("You aren't allowed to set the key 'data' for the parameters object");
+ }
+ let buf = ref.alloc('Object');
+ param.data = [];
+ var cr = this;
+
+ // This callback wrapper accumulates the data from the fetch_rows_a function from the cinterface. It is accumulated by passing the param2
+ // object which holds accumulated data in the data key.
+ let asyncCallbackWrapper = function asyncCallbackWrapper(param2, result2, numOfRows2, rowData) {
+ param2 = ref.readObject(param2); //return the object back from the pointer
+ if (numOfRows2 > 0 && rowData.length != 0) {
+ // Keep fetching until now rows left.
+ let buf2 = ref.alloc('Object');
+ param2.data.push(rowData);
+ ref.writeObject(buf2, 0, param2);
+ cr._chandle.fetch_rows_a(result2, asyncCallbackWrapper, buf2);
+ }
+ else {
+ let finalData = param2.data;
+ let fields = cr._chandle.fetchFields_a(result2);
+ let data = [];
+ for (let i = 0; i < finalData.length; i++) {
+ let num_of_rows = finalData[i][0].length; //fetched block number i;
+ let block = finalData[i];
+ for (let j = 0; j < num_of_rows; j++) {
+ data.push([]);
+ let rowBlock = new Array(fields.length);
+ for (let k = 0; k < fields.length; k++) {
+ rowBlock[k] = block[k][j];
+ }
+ data[data.length - 1] = rowBlock;
+ }
+ }
+ cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks!
+ callback(param2, result2, numOfRows2, { data: data, fields: fields });
+
+ }
+ }
+ ref.writeObject(buf, 0, param);
+ param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param
+ return { param: param, result: result };
+}
+/**
+ * Stop a query given the result handle.
+ * @param {Buffer} result - The buffer that acts as the result handle
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.stopQuery = function stopQuery(result) {
+ this._chandle.stopQuery(result);
+}
+TDengineCursor.prototype._reset_result = function _reset_result() {
+ this._rowcount = -1;
+ if (this._result != null) {
+ this._chandle.freeResult(this._result);
+ }
+ this._result = null;
+ this._fields = null;
+ this.data = [];
+ this.fields = null;
+}
+/**
+ * Get server info such as version number
+ * @return {string}
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.getServerInfo = function getServerInfo() {
+ return this._chandle.getServerInfo(this._connection._conn);
+}
+/**
+ * Get client info such as version number
+ * @return {string}
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.getClientInfo = function getClientInfo() {
+ return this._chandle.getClientInfo();
+}
+/**
+ * Subscribe to a table from a database in TDengine.
+ * @param {Object} config - A configuration object containing the configuration options for the subscription
+ * @param {string} config.restart - whether or not to continue a subscription if it already exits, otherwise start from beginning
+ * @param {string} config.topic - The unique identifier of a subscription
+ * @param {string} config.sql - A sql statement for data query
+ * @param {string} config.interval - The pulling interval
+ * @return {Buffer} A buffer pointing to the subscription session handle
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.subscribe = function subscribe(config) {
+ let restart = config.restart ? 1 : 0;
+ return this._chandle.subscribe(this._connection._conn, restart, config.topic, config.sql, config.interval);
+};
+/**
+ * An infinite loop that consumes the latest data and calls a callback function that is provided.
+ * @param {Buffer} subscription - A buffer object pointing to the subscription session handle
+ * @param {function} callback - The callback function that takes the row data, field/column meta data, and the subscription session handle as input
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) {
+ while (true) {
+ let { data, fields, result } = this._chandle.consume(subscription);
+ callback(data, fields, result);
+ }
+}
+/**
+ * Unsubscribe the provided buffer object pointing to the subscription session handle
+ * @param {Buffer} subscription - A buffer object pointing to the subscription session handle that is to be unsubscribed
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
+ this._chandle.unsubscribe(subscription);
+}
+/**
+ * Open a stream with TDengine to run the sql query periodically in the background
+ * @param {string} sql - The query to run
+ * @param {function} callback - The callback function to run after each query, accepting inputs as param, result handle, data, fields meta data
+ * @param {number} stime - The time of the stream starts in the form of epoch milliseconds. If 0 is given, the start time is set as the current time.
+ * @param {function} stoppingCallback - The callback function to run when the continuous query stops. It takes no inputs
+ * @param {object} param - A parameter that is passed to the main callback function
+ * @return {Buffer} A buffer pointing to the stream handle
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
+ let buf = ref.alloc('Object');
+ ref.writeObject(buf, 0, param);
+
+ let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
+ let data = [];
+ let num_of_rows = blocks[0].length;
+ for (let j = 0; j < num_of_rows; j++) {
+ data.push([]);
+ let rowBlock = new Array(fields.length);
+ for (let k = 0; k < fields.length; k++) {
+ rowBlock[k] = blocks[k][j];
+ }
+ data[data.length - 1] = rowBlock;
+ }
+ callback(param2, result2, blocks, fields);
+ }
+ return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
+}
+/**
+ * Close a stream
+ * @param {Buffer} - A buffer pointing to the handle of the stream to be closed
+ * @since 1.3.0
+ */
+TDengineCursor.prototype.closeStream = function closeStream(stream) {
+ this._chandle.closeStream(stream);
+}
diff --git a/tests/connectorTest/nodejsTest/nodetaos/error.js b/tests/connectorTest/nodejsTest/nodetaos/error.js
new file mode 100644
index 0000000000000000000000000000000000000000..8ab91a50c7d81a4675246617e0969ee8c81c514e
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/error.js
@@ -0,0 +1,96 @@
+
+/**
+ * TDengine Error Class
+ * @ignore
+ */
+class TDError extends Error {
+ constructor(args) {
+ super(args)
+ this.name = "TDError";
+ }
+}
+/** Exception raised for important warnings like data truncations while inserting.
+ * @ignore
+ */
+class Warning extends Error {
+ constructor(args) {
+ super(args)
+ this.name = "Warning";
+ }
+}
+/** Exception raised for errors that are related to the database interface rather than the database itself.
+ * @ignore
+ */
+class InterfaceError extends TDError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.InterfaceError";
+ }
+}
+/** Exception raised for errors that are related to the database.
+ * @ignore
+ */
+class DatabaseError extends TDError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError";
+ }
+}
+/** Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range.
+ * @ignore
+ */
+class DataError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.DataError";
+ }
+}
+/** Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer
+ * @ignore
+ */
+class OperationalError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.OperationalError";
+ }
+}
+/** Exception raised when the relational integrity of the database is affected.
+ * @ignore
+ */
+class IntegrityError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.IntegrityError";
+ }
+}
+/** Exception raised when the database encounters an internal error.
+ * @ignore
+ */
+class InternalError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.InternalError";
+ }
+}
+/** Exception raised for programming errors.
+ * @ignore
+ */
+class ProgrammingError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.ProgrammingError";
+ }
+}
+/** Exception raised in case a method or database API was used which is not supported by the database.
+ * @ignore
+ */
+class NotSupportedError extends DatabaseError {
+ constructor(args) {
+ super(args)
+ this.name = "TDError.DatabaseError.NotSupportedError";
+ }
+}
+
+module.exports = {
+ TDError, Warning, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError
+};
diff --git a/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js
new file mode 100644
index 0000000000000000000000000000000000000000..cf7344c868ee94831eba47ff55369a684e34b02f
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js
@@ -0,0 +1,14 @@
+/* Wrap a callback, reduce code amount */
+function wrapCB(callback, input) {
+ if (typeof callback === 'function') {
+ callback(input);
+ }
+ return;
+}
+global.wrapCB = wrapCB;
+function toTaosTSString(date) {
+ date = new Date(date);
+ let tsArr = date.toISOString().split("T")
+ return tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1);
+}
+global.toTaosTSString = toTaosTSString;
diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js
new file mode 100644
index 0000000000000000000000000000000000000000..3bc0fe0aca060a32daa7a5cebd2dbfb99ac29a7c
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js
@@ -0,0 +1,152 @@
+const FieldTypes = require('./constants');
+const util = require('util');
+/**
+ * Various objects such as TaosRow and TaosColumn that help make parsing data easier
+ * @module TaosObjects
+ *
+ */
+
+/**
+ * The TaosRow object. Contains the data from a retrieved row from a database and functions that parse the data.
+ * @typedef {Object} TaosRow - A row of data retrieved from a table.
+ * @global
+ * @example
+ * var trow = new TaosRow(row);
+ * console.log(trow.data);
+ */
+function TaosRow(row) {
+ this.data = row;
+ this.length = row.length;
+ return this;
+}
+
+/**
+ * @typedef {Object} TaosField - A field/column's metadata from a table.
+ * @global
+ * @example
+ * var tfield = new TaosField(field);
+ * console.log(tfield.name);
+ */
+
+function TaosField(field) {
+ this._field = field;
+ this.name = field.name;
+ this.type = FieldTypes.getType(field.type);
+ return this;
+}
+
+/**
+ * A TaosTimestamp object, which is the standard date object with added functionality
+ * @global
+ * @memberof TaosObjects
+ * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000
+ */
+class TaosTimestamp extends Date {
+ constructor(date, precision = 0) {
+ if (precision === 1) {
+ super(Math.floor(date / 1000));
+ this.precisionExtras = date % 1000;
+ } else if (precision === 2) {
+ // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected
+ super(parseInt(BigInt(date) / 1000000n));
+ // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405)
+ this.precisionExtras = parseInt(BigInt(date) % 1000000n);
+ } else {
+ super(parseInt(date));
+ }
+ this.precision = precision;
+ }
+
+ /**
+ * TDengine raw timestamp.
+ * @returns raw taos timestamp (int64)
+ */
+ taosTimestamp() {
+ if (this.precision == 1) {
+ return (this * 1000 + this.precisionExtras);
+ } else if (this.precision == 2) {
+ return (this * 1000000 + this.precisionExtras);
+ } else {
+ return Math.floor(this);
+ }
+ }
+
+ /**
+ * Gets the microseconds of a Date.
+ * @return {Int} A microseconds integer
+ */
+ getMicroseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000 + this.precisionExtras / 1000;
+ } else {
+ return 0;
+ }
+ }
+ /**
+ * Gets the nanoseconds of a TaosTimestamp.
+ * @return {Int} A nanoseconds integer
+ */
+ getNanoseconds() {
+ if (this.precision == 1) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000;
+ } else if (this.precision == 2) {
+ return this.getMilliseconds() * 1000000 + this.precisionExtras;
+ } else {
+ return 0;
+ }
+ }
+
+ /**
+ * @returns {String} a string for timestamp string format
+ */
+ _precisionExtra() {
+ if (this.precision == 1) {
+ return String(this.precisionExtras).padStart(3, '0');
+ } else if (this.precision == 2) {
+ return String(this.precisionExtras).padStart(6, '0');
+ } else {
+ return '';
+ }
+ }
+ /**
+ * @function Returns the date into a string usable by TDengine
+ * @return {string} A Taos Timestamp String
+ */
+ toTaosString() {
+ var tzo = -this.getTimezoneOffset(),
+ dif = tzo >= 0 ? '+' : '-',
+ pad = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ return (norm < 10 ? '0' : '') + norm;
+ },
+ pad2 = function (num) {
+ var norm = Math.floor(Math.abs(num));
+ if (norm < 10) return '00' + norm;
+ if (norm < 100) return '0' + norm;
+ if (norm < 1000) return norm;
+ };
+ return this.getFullYear() +
+ '-' + pad(this.getMonth() + 1) +
+ '-' + pad(this.getDate()) +
+ ' ' + pad(this.getHours()) +
+ ':' + pad(this.getMinutes()) +
+ ':' + pad(this.getSeconds()) +
+ '.' + pad2(this.getMilliseconds()) +
+ '' + this._precisionExtra();
+ }
+
+ /**
+ * Custom console.log
+ * @returns {String} string format for debug
+ */
+ [util.inspect.custom](depth, opts) {
+ return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts);
+ }
+ toString() {
+ return this.toTaosString();
+ }
+}
+
+module.exports = { TaosRow, TaosField, TaosTimestamp }
diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosquery.js b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js
new file mode 100644
index 0000000000000000000000000000000000000000..eeede3ff6885e27c1d1c569a7a410f88109c9acd
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js
@@ -0,0 +1,112 @@
+var TaosResult = require('./taosresult')
+require('./globalfunc.js')
+module.exports = TaosQuery;
+
+
+/**
+ * @class TaosQuery
+ * @classdesc The TaosQuery class is one level above the TDengine Cursor in that it makes sure to generally return promises from functions, and wrap
+ * all data with objects such as wrapping a row of data with Taos Row. This is meant to enable an higher level API that allows additional
+ * functionality and save time whilst also making it easier to debug and enter less problems with the use of promises.
+ * @param {string} query - Query to construct object from
+ * @param {TDengineCursor} cursor - The cursor from which this query will execute from
+ * @param {boolean} execute - Whether or not to immedietely execute the query synchronously and fetch all results. Default is false.
+ * @property {string} query - The current query in string format the TaosQuery object represents
+ * @return {TaosQuery}
+ * @since 1.0.6
+ */
+function TaosQuery(query = "", cursor = null, execute = false) {
+ this.query = query;
+ this._cursor = cursor;
+ if (execute == true) {
+ return this.execute();
+ }
+ return this;
+}
+
+/**
+ * Executes the query object and returns a Promise
+ * @memberof TaosQuery
+ * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error
+ * @since 1.0.6
+ */
+TaosQuery.prototype.execute = async function execute() {
+ var taosQuery = this; //store the current instance of taosQuery to avoid async issues?
+ var executionPromise = new Promise(function(resolve, reject) {
+ let data = [];
+ let fields = [];
+ let result;
+ try {
+ taosQuery._cursor.execute(taosQuery.query);
+ if (taosQuery._cursor._fields) fields = taosQuery._cursor._fields;
+ if (taosQuery._cursor._result != null) data = taosQuery._cursor.fetchall();
+ result = new TaosResult(data, fields)
+ }
+ catch(err) {
+ reject(err);
+ }
+ resolve(result)
+
+ });
+ return executionPromise;
+}
+
+/**
+ * Executes the query object asynchronously and returns a Promise. Completes query to completion.
+ * @memberof TaosQuery
+ * @param {Object} options - Execution options
+ * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error
+ * @since 1.2.0
+ */
+TaosQuery.prototype.execute_a = async function execute_a(options = {}) {
+ var executionPromise = new Promise( (resolve, reject) => {
+
+ });
+ var fres;
+ var frej;
+ var fetchPromise = new Promise( (resolve, reject) => {
+ fres = resolve;
+ frej = reject;
+ });
+ let asyncCallbackFetchall = async function(param, res, numOfRows, blocks) {
+ if (numOfRows > 0) {
+ // Likely a query like insert
+ fres();
+ }
+ else {
+ fres(new TaosResult(blocks.data, blocks.fields));
+ }
+ }
+ let asyncCallback = async function(param, res, code) {
+ //upon success, we fetchall results
+ this._cursor.fetchall_a(res, options, asyncCallbackFetchall, {});
+ }
+ this._cursor.execute_a(this.query, asyncCallback.bind(this), {});
+ return fetchPromise;
+}
+
+/**
+ * Bind arguments to the query and automatically parses them into the right format
+ * @param {array | ...args} args - A number of arguments to bind to each ? in the query
+ * @return {TaosQuery}
+ * @example
+ * // An example of binding a javascript date and a number to a query
+ * var query = cursor.query("select count(*) from meterinfo.meters where ts <= ? and areaid = ?").bind(new Date(), 3);
+ * var promise1 = query.execute();
+ * promise1.then(function(result) {
+ * result.pretty(); // Log the prettified version of the results.
+ * });
+ * @since 1.0.6
+ */
+TaosQuery.prototype.bind = function bind(f, ...args) {
+ if (typeof f == 'object' && f.constructor.name != 'Array') args.unshift(f); //param is not an array object
+ else if (typeof f != 'object') args.unshift(f);
+ else { args = f; }
+ args.forEach(function(arg) {
+ if (arg.constructor.name == 'TaosTimestamp') arg = "\"" + arg.toTaosString() + "\"";
+ else if (arg.constructor.name == 'Date') arg = "\"" + toTaosTSString(arg) + "\"";
+ else if (typeof arg == 'string') arg = "\"" + arg + "\"";
+ this.query = this.query.replace(/\?/,arg);
+ }, this);
+ return this;
+}
diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosresult.js b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js
new file mode 100644
index 0000000000000000000000000000000000000000..4138ebbec6e1b792691d17a25b7c18d35b6a922a
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js
@@ -0,0 +1,85 @@
+require('./globalfunc.js')
+const TaosObjects = require('./taosobjects');
+const TaosRow = TaosObjects.TaosRow;
+const TaosField = TaosObjects.TaosField;
+
+module.exports = TaosResult;
+/**
+ * @class TaosResult
+ * @classdesc A TaosResult class consts of the row data and the fields metadata, all wrapped under various objects for higher functionality.
+ * @param {Array} data - Array of result rows
+ * @param {Array} fields - Array of field meta data
+ * @property {Array} data - Array of TaosRows forming the result data (this does not include field meta data)
+ * @property {Array} fields - Array of TaosFields forming the fields meta data array.
+ * @return {TaosResult}
+ * @since 1.0.6
+ */
+function TaosResult(data, fields) {
+ this.data = data.map(row => new TaosRow(row));
+ this.rowcount = this.data.length;
+ this.fields = fields.map(field => new TaosField(field));
+}
+/**
+ * Pretty print data and the fields meta data as if you were using the taos shell
+ * @memberof TaosResult
+ * @function pretty
+ * @since 1.0.6
+ */
+
+TaosResult.prototype.pretty = function pretty() {
+ let fieldsStr = "";
+ let sizing = [];
+ this.fields.forEach((field,i) => {
+ if (field._field.type == 8 || field._field.type == 10){
+ sizing.push(Math.max(field.name.length, field._field.bytes));
+ }
+ else {
+ sizing.push(Math.max(field.name.length, suggestedMinWidths[field._field.type]));
+ }
+ fieldsStr += fillEmpty(Math.floor(sizing[i]/2 - field.name.length / 2)) + field.name + fillEmpty(Math.ceil(sizing[i]/2 - field.name.length / 2)) + " | ";
+ });
+ var sumLengths = sizing.reduce((a,b)=> a+=b,(0)) + sizing.length * 3;
+
+ console.log("\n" + fieldsStr);
+ console.log(printN("=",sumLengths));
+ this.data.forEach(row => {
+ let rowStr = "";
+ row.data.forEach((entry, i) => {
+ if (this.fields[i]._field.type == 9) {
+ entry = entry.toTaosString();
+ } else {
+ entry = entry == null ? 'null' : entry.toString();
+ }
+ rowStr += entry
+ rowStr += fillEmpty(sizing[i] - entry.length) + " | ";
+ });
+ console.log(rowStr);
+ });
+}
+const suggestedMinWidths = {
+ 0: 4,
+ 1: 4,
+ 2: 4,
+ 3: 6,
+ 4: 11,
+ 5: 12,
+ 6: 24,
+ 7: 24,
+ 8: 10,
+ 9: 25,
+ 10: 10,
+}
+function printN(s, n) {
+ let f = "";
+ for (let i = 0; i < n; i ++) {
+ f += s;
+ }
+ return f;
+}
+function fillEmpty(n) {
+ let str = "";
+ for (let i = 0; i < n; i++) {
+ str += " ";
+ }
+ return str;
+}
diff --git a/tests/connectorTest/nodejsTest/readme.md b/tests/connectorTest/nodejsTest/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..26a28afbdd514ad97e969302e7d790f6240bb770
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/readme.md
@@ -0,0 +1,161 @@
+# TDengine Node.js connector
+[](https://github.com/taosdata/TDengine/tree/master/src/connector/nodejs) [](https://github.com/taosdata/TDengine/#what-is-tdengine)
+
+This is the Node.js library that lets you connect to [TDengine](https://www.github.com/taosdata/tdengine) 2.0 version. It is built so that you can use as much of it as you want or as little of it as you want through providing an extensive API. If you want the raw data in the form of an array of arrays for the row data retrieved from a table, you can do that. If you want to wrap that data with objects that allow you easily manipulate and display data such as using a prettifier function, you can do that!
+
+## Installation
+
+To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/)
+
+```cmd
+npm install td2.0-connector
+```
+
+To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)
+
+### On Linux
+
+- `python` (`v2.7` recommended, `v3.x.x` is **not** supported)
+- `make`
+- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org)
+- `node` (between `v10.x` and `v11.x`, other version has some dependency compatibility problems)
+
+### On macOS
+
+- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS)
+
+- Xcode
+
+ - You also need to install the
+
+ ```
+ Command Line Tools
+ ```
+
+ via Xcode. You can find this under the menu
+
+ ```
+ Xcode -> Preferences -> Locations
+ ```
+
+ (or by running
+
+ ```
+ xcode-select --install
+ ```
+
+ in your Terminal)
+
+ - This step will install `gcc` and the related toolchain containing `make`
+
+### On Windows
+
+#### Option 1
+
+Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator).
+
+#### Option 2
+
+Install tools and configuration manually:
+
+- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload)
+- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.)
+- Launch cmd, `npm config set msvs_version 2017`
+
+If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips.
+
+To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
+
+## Usage
+
+The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node)
+
+### Connection
+
+To use the connector, first require the library ```td2.0-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below.
+
+A cursor also needs to be initialized in order to interact with TDengine from Node.js.
+
+```javascript
+const taos = require('td2.0-connector');
+var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
+var cursor = conn.cursor(); // Initializing a new cursor
+```
+
+Close a connection
+
+```javascript
+conn.close();
+```
+
+### Queries
+
+We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object.
+
+```javascript
+var query = cursor.query('show databases;')
+```
+
+We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results.
+
+```javascript
+var promise = query.execute();
+promise.then(function(result) {
+ result.pretty(); //logs the results to the console as if you were in the taos shell
+});
+```
+
+You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine
+```javascript
+var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
+query.execute().then(function(result) {
+ result.pretty();
+})
+```
+
+The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery.
+```javascript
+var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
+promise.then(function(result) {
+ result.pretty();
+})
+```
+
+If you want to execute queries without objects being wrapped around the data, use ```cursor.execute()``` directly and ```cursor.fetchall()``` to retrieve data if there is any.
+```javascript
+cursor.execute('select count(*), avg(v1), min(v2) from meterinfo.meters where ts >= \"2019-07-20 00:00:00.000\";');
+var data = cursor.fetchall();
+console.log(cursor.fields); // Latest query's Field metadata is stored in cursor.fields
+console.log(cursor.data); // Latest query's result data is stored in cursor.data, also returned by fetchall.
+```
+
+### Async functionality
+
+Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.query`, but now with `_a` appended to them.
+
+Say you want to execute an two async query on two separate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
+
+```javascript
+var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
+var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
+promise1.then(function(result) {
+ result.pretty();
+})
+promise2.then(function(result) {
+ result.pretty();
+})
+```
+
+## Example
+
+An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector)
+
+An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
+
+## Contributing to TDengine
+
+Please follow the [contribution guidelines](https://github.com/taosdata/TDengine/blob/master/CONTRIBUTING.md) to contribute to the project.
+
+## License
+
+[GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html)
diff --git a/tests/connectorTest/nodejsTest/tdengine.js b/tests/connectorTest/nodejsTest/tdengine.js
new file mode 100644
index 0000000000000000000000000000000000000000..047c744a4fc90c6306e851eaa529a7f9f578fe12
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/tdengine.js
@@ -0,0 +1,4 @@
+var TDengineConnection = require('./nodetaos/connection.js')
+module.exports.connect = function (connection={}) {
+ return new TDengineConnection(connection);
+}
diff --git a/tests/connectorTest/nodejsTest/test/performance.js b/tests/connectorTest/nodejsTest/test/performance.js
new file mode 100644
index 0000000000000000000000000000000000000000..ea197f034435e28edd67df8d5f4b141f410fed81
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/test/performance.js
@@ -0,0 +1,89 @@
+function memoryUsageData() {
+ let s = process.memoryUsage()
+ for (key in s) {
+ s[key] = (s[key]/1000000).toFixed(3) + "MB";
+ }
+ return s;
+}
+console.log("initial mem usage:", memoryUsageData());
+
+const { PerformanceObserver, performance } = require('perf_hooks');
+const taos = require('../tdengine');
+var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0});
+var c1 = conn.cursor();
+
+// Initialize env
+c1.execute('create database if not exists td_connector_test;');
+c1.execute('use td_connector_test;')
+c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));');
+c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));')
+
+
+// Insertion into single table Performance Test
+var dataPrepTime = 0;
+var insertTime = 0;
+var insertTime5000 = 0;
+var avgInsert5ktime = 0;
+const obs = new PerformanceObserver((items) => {
+ let entry = items.getEntries()[0];
+
+ if (entry.name == 'Data Prep') {
+ dataPrepTime += entry.duration;
+ }
+ else if (entry.name == 'Insert'){
+ insertTime += entry.duration
+ }
+ else {
+ console.log(entry.name + ': ' + (entry.duration/1000).toFixed(8) + 's');
+ }
+ performance.clearMarks();
+});
+obs.observe({ entryTypes: ['measure'] });
+
+function R(l,r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+function insertN(n) {
+ for (let i = 0; i < n; i++) {
+ performance.mark('A3');
+ let insertData = ["now + " + i + "m", // Timestamp
+ parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int
+ parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt
+ parseFloat( R(-3.4E38, 3.4E38) ), // Float
+ parseFloat( R(-1.7E308, 1.7E308) ), // Double
+ "\"Long Binary\"", // Binary
+ parseInt( R(-32767, 32767) ), // Small Int
+ parseInt( R(-127, 127) ), // Tiny Int
+ randomBool(),
+ "\"Nchars 一些中文字幕\""]; // Bool
+ let query = 'insert into td_connector_test.all_types values(' + insertData.join(',') + ' );';
+ performance.mark('B3');
+ performance.measure('Data Prep', 'A3', 'B3');
+ performance.mark('A2');
+ c1.execute(query, {quiet:true});
+ performance.mark('B2');
+ performance.measure('Insert', 'A2', 'B2');
+ if ( i % 5000 == 4999) {
+ console.log("Insert # " + (i+1));
+ console.log('Insert 5k records: ' + ((insertTime - insertTime5000)/1000).toFixed(8) + 's');
+ insertTime5000 = insertTime;
+ avgInsert5ktime = (avgInsert5ktime/1000 * Math.floor(i / 5000) + insertTime5000/1000) / Math.ceil( i / 5000);
+ console.log('DataPrepTime So Far: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time So Far: ' + (insertTime/1000).toFixed(8) + 's | Avg. Insert 5k time: ' + avgInsert5ktime.toFixed(8));
+
+
+ }
+ }
+}
+performance.mark('insert 1E5')
+insertN(1E5);
+performance.mark('insert 1E5 2')
+performance.measure('Insert With Logs', 'insert 1E5', 'insert 1E5 2');
+console.log('DataPrepTime: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time: ' + (insertTime/1000).toFixed(8) + 's');
+dataPrepTime = 0; insertTime = 0;
+//'insert into td_connector_test.all_types values (now, null,null,null,null,null,null,null,null,null);'
diff --git a/tests/connectorTest/nodejsTest/test/test.js b/tests/connectorTest/nodejsTest/test/test.js
new file mode 100644
index 0000000000000000000000000000000000000000..caf05955da4c960ebedc872f400c17d18be767dd
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/test/test.js
@@ -0,0 +1,170 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1) + "\"";
+}
+function R(l,r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+c1.execute('create database if not exists td_connector_test;');
+c1.execute('use td_connector_test;')
+c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));');
+c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));')
+
+// Shell Test : The following uses the cursor to imitate the taos shell
+
+// Insert
+for (let i = 0; i < 10000; i++) {
+ let insertData = ["now+" + i + "s", // Timestamp
+ parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int
+ parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt
+ parseFloat( R(-3.4E38, 3.4E38) ), // Float
+ parseFloat( R(-1.7E30, 1.7E30) ), // Double
+ "\"Long Binary\"", // Binary
+ parseInt( R(-32767, 32767) ), // Small Int
+ parseInt( R(-127, 127) ), // Tiny Int
+ randomBool(),
+ "\"Nchars\""]; // Bool
+ c1.execute('insert into td_connector_test.all_types values(' + insertData.join(',') + ' );', {quiet:true});
+ if (i % 1000 == 0) {
+ console.log("Insert # " , i);
+ }
+}
+
+// Select
+console.log('select * from td_connector_test.all_types limit 3 offset 100;');
+c1.execute('select * from td_connector_test.all_types limit 2 offset 100;');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+console.log(d);
+
+// Functions
+console.log('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;')
+c1.execute('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;');
+var d = c1.fetchall();
+console.log(c1.fields);
+console.log(d);
+
+// Immediate Execution like the Shell
+
+c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0;', true).then(function(result){
+ result.pretty();
+})
+
+c1.query('select _tinyint, _bool from all_types where _tinyint > 50 and _int < 0 limit 50;', true).then(function(result){
+ result.pretty();
+})
+
+c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types;', true).then(function(result){
+ result.pretty();
+})
+c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types interval(1m) limit 100;', true).then(function(result){
+ result.pretty();
+})
+
+// Binding arguments, and then using promise
+var q = c1.query('select _nchar from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100)
+console.log(q.query);
+q.execute().then(function(r) {
+ r.pretty();
+});
+
+
+// test query null value
+c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))");
+c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)");
+c1.execute("insert into t1(ts, temperature) values(now, 22.22)");
+c1.execute("insert into t1(ts, humidity) values(now, 33)");
+c1.query('select * from test.t1', true).then(function (result) {
+ result.pretty();
+});
+
+var q = c1.query('select * from td_connector_test.weather');
+console.log(q.query);
+q.execute().then(function(r) {
+ r.pretty();
+});
+
+function sleep(sleepTime) {
+ for(var start = +new Date; +new Date - start <= sleepTime; ) { }
+}
+
+sleep(10000);
+
+// Raw Async Testing (Callbacks, not promises)
+function cb2(param, result, rowCount, rd) {
+ console.log('CB2 Callbacked!');
+ console.log("RES *", result);
+ console.log("Async fetched", rowCount, " rows");
+ console.log("Passed Param: ", param);
+ console.log("Fields ", rd.fields);
+ console.log("Data ", rd.data);
+}
+function cb1(param,result,code) {
+ console.log('CB1 Callbacked!');
+ console.log("RES * ", result);
+ console.log("Status: ", code);
+ console.log("Passed Param ", param);
+ c1.fetchall_a(result, cb2, param);
+}
+
+c1.execute_a("describe td_connector_test.all_types;", cb1, {myparam:3.141});
+
+function cb4(param, result, rowCount, rd) {
+ console.log('CB4 Callbacked!');
+ console.log("RES *", result);
+ console.log("Async fetched", rowCount, "rows");
+ console.log("Passed Param: ", param);
+ console.log("Fields", rd.fields);
+ console.log("Data", rd.data);
+}
+// Without directly calling fetchall_a
+var thisRes;
+function cb3(param,result,code) {
+ console.log('CB3 Callbacked!');
+ console.log("RES *", result);
+ console.log("Status:", code);
+ console.log("Passed Param", param);
+ thisRes = result;
+}
+//Test calling execute and fetchall seperately and not through callbacks
+var param = c1.execute_a("describe td_connector_test.all_types;", cb3, {e:2.718});
+console.log("Passed Param outside of callback: ", param);
+console.log(param);
+setTimeout(function(){
+ c1.fetchall_a(thisRes, cb4, param);
+},100);
+
+
+// Async through promises
+var aq = c1.query('select count(*) from td_connector_test.all_types;',false);
+aq.execute_a().then(function(data) {
+ data.pretty();
+});
+
+c1.query('describe td_connector_test.stabletest').execute_a().then(function(r){
+ r.pretty()
+});
+
+setTimeout(function(){
+ c1.query('drop database td_connector_test;');
+},200);
+
+setTimeout(function(){
+ conn.close();
+},2000);
diff --git a/tests/connectorTest/nodejsTest/test/testMicroseconds.js b/tests/connectorTest/nodejsTest/test/testMicroseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/test/testMicroseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_us';
+c1.execute('create database if not exists ' + dbname + ' precision "us"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914) {
+ throw "microseconds not match!";
+}
+if (ts.getMicroseconds() % 1000 !== 914) {
+ throw "micronsecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_us_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/tests/connectorTest/nodejsTest/test/testNanoseconds.js b/tests/connectorTest/nodejsTest/test/testNanoseconds.js
new file mode 100644
index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/test/testNanoseconds.js
@@ -0,0 +1,49 @@
+const taos = require('../tdengine');
+var conn = taos.connect();
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+//c1.execute('drop database td_connector_test;');
+const dbname = 'nodejs_test_ns';
+c1.execute('create database if not exists ' + dbname + ' precision "ns"');
+c1.execute('use ' + dbname)
+c1.execute('create table if not exists tstest (ts timestamp, _int int);');
+c1.execute('insert into tstest values(1625801548423914405, 0)');
+// Select
+console.log('select * from tstest');
+c1.execute('select * from tstest');
+
+var d = c1.fetchall();
+console.log(c1.fields);
+let ts = d[0][0];
+console.log(ts);
+
+if (ts.taosTimestamp() != 1625801548423914405) {
+ throw "nanosecond not match!";
+}
+if (ts.getNanoseconds() % 1000000 !== 914405) {
+ throw "nanosecond precision error";
+}
+setTimeout(function () {
+ c1.query('drop database nodejs_ns_test;');
+}, 200);
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
diff --git a/tests/connectorTest/nodejsTest/test/testSubscribe.js b/tests/connectorTest/nodejsTest/test/testSubscribe.js
new file mode 100644
index 0000000000000000000000000000000000000000..30fb3f425683f0113873534f2b67255db811edcc
--- /dev/null
+++ b/tests/connectorTest/nodejsTest/test/testSubscribe.js
@@ -0,0 +1,16 @@
+const taos = require('../tdengine');
+var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10});
+var c1 = conn.cursor();
+let stime = new Date();
+let interval = 1000;
+c1.execute('use td_connector_test');
+let sub = c1.subscribe({
+ restart: true,
+ sql: "select AVG(_int) from td_connector_test.all_Types;",
+ topic: 'all_Types',
+ interval: 1000
+});
+
+c1.consumeData(sub, (data, fields) => {
+ console.log(data);
+});
\ No newline at end of file
diff --git a/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6a4bc73aef3e19bc56e817325acd62d21156d67
--- /dev/null
+++ b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py
@@ -0,0 +1,111 @@
+import pyodbc
+import argparse
+import sys
+
+parser = argparse.ArgumentParser(description='Access TDengine via ODBC.')
+parser.add_argument('--DSN', help='DSN to use')
+parser.add_argument('--UID', help='UID to use')
+parser.add_argument('--PWD', help='PWD to use')
+parser.add_argument('--Server', help='Server to use')
+parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use')
+
+args = parser.parse_args()
+
+a = 'DSN=%s'%args.DSN if args.DSN else None
+b = 'UID=%s'%args.UID if args.UID else None
+c = 'PWD=%s'%args.PWD if args.PWD else None
+d = 'Server=%s'%args.Server if args.Server else None
+conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None
+conn_str = conn_str if conn_str else args.C
+if not conn_str:
+ parser.print_help(file=sys.stderr)
+ exit()
+
+print('connecting: [%s]' % conn_str)
+cnxn = pyodbc.connect(conn_str, autocommit=True)
+cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8')
+
+cursor = cnxn.cursor()
+cursor.execute("drop database if exists db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create database db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129")
+##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("""
+INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?)
+""",
+"2020-12-12 00:00:00",
+'true',
+'-127',
+'-32767',
+'-2147483647',
+'-9223372036854775807',
+'-1.23e10',
+'-11.23e6',
+'abcdefghij'.encode('utf-8'),
+"人啊大发测试及abc")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("drop database if exists db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create database db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)")
+cursor.close()
+
+params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'),
+ ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'),
+ ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'),
+ ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ]
+cursor = cnxn.cursor()
+cursor.fast_executemany = True
+print('py:...................')
+cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params)
+print('py:...................')
+cursor.close()
+
+## cursor = cnxn.cursor()
+## cursor.execute("SELECT * from db.v where v1 > ?", 4)
+## row = cursor.fetchone()
+## while row:
+## print(row)
+## row = cursor.fetchone()
+## cursor.close()
+##
+## cursor = cnxn.cursor()
+## cursor.execute("SELECT * from db.v where v1 > ?", '5')
+## row = cursor.fetchone()
+## while row:
+## print(row)
+## row = cursor.fetchone()
+## cursor.close()
+
diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.go b/tests/connectorTest/odbcTest/nanosupport/odbc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d9c760c4e87a4a899051edc74692ecca8a19d15
--- /dev/null
+++ b/tests/connectorTest/odbcTest/nanosupport/odbc.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "context"
+ "database/sql"
+ "flag"
+ "log"
+ "os"
+ "os/signal"
+ "time"
+ _ "github.com/alexbrainman/odbc"
+)
+
+var pool *sql.DB // Database connection pool.
+
+func main() {
+ id := flag.Int64("id", 32768, "person ID to find")
+ dsn := flag.String("dsn", os.Getenv("DSN"), "connection data source name")
+ flag.Parse()
+
+ if len(*dsn) == 0 {
+ log.Fatal("missing dsn flag")
+ }
+ if *id == 0 {
+ log.Fatal("missing person ID")
+ }
+ var err error
+
+ // Opening a driver typically will not attempt to connect to the database.
+ pool, err = sql.Open("odbc", *dsn)
+ if err != nil {
+ // This will not be a connection error, but a DSN parse error or
+ // another initialization error.
+ log.Fatal("unable to use data source name", err)
+ }
+ defer pool.Close()
+
+ pool.SetConnMaxLifetime(0)
+ pool.SetMaxIdleConns(3)
+ pool.SetMaxOpenConns(3)
+
+ ctx, stop := context.WithCancel(context.Background())
+ defer stop()
+
+ appSignal := make(chan os.Signal, 3)
+ signal.Notify(appSignal, os.Interrupt)
+
+ go func() {
+ select {
+ case <-appSignal:
+ stop()
+ }
+ }()
+
+ Ping(ctx)
+
+ Query(ctx, *id)
+}
+
+// Ping the database to verify DSN provided by the user is valid and the
+// server accessible. If the ping fails exit the program with an error.
+func Ping(ctx context.Context) {
+ ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
+ defer cancel()
+
+ if err := pool.PingContext(ctx); err != nil {
+ log.Fatalf("unable to connect to database: %v", err)
+ }
+}
+
+// Query the database for the information requested and prints the results.
+// If the query fails exit the program with an error.
+func Query(ctx context.Context, id int64) {
+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+
+ var name string
+ err := pool.QueryRowContext(ctx, "select name from m.t").Scan(&name)
+ if err != nil {
+ log.Fatal("unable to execute search query", err)
+ }
+ log.Println("name=", name)
+}
+
diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.py b/tests/connectorTest/odbcTest/nanosupport/odbc.py
new file mode 100644
index 0000000000000000000000000000000000000000..cee0cf1a13f6360790de368637e2b6a05de3564f
--- /dev/null
+++ b/tests/connectorTest/odbcTest/nanosupport/odbc.py
@@ -0,0 +1,115 @@
+import pyodbc
+import argparse
+import sys
+
+parser = argparse.ArgumentParser(description='Access TDengine via ODBC.')
+parser.add_argument('--DSN', help='DSN to use')
+parser.add_argument('--UID', help='UID to use')
+parser.add_argument('--PWD', help='PWD to use')
+parser.add_argument('--Server', help='Server to use')
+parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use')
+
+args = parser.parse_args()
+
+a = 'DSN=%s'%args.DSN if args.DSN else None
+b = 'UID=%s'%args.UID if args.UID else None
+c = 'PWD=%s'%args.PWD if args.PWD else None
+d = 'Server=%s'%args.Server if args.Server else None
+conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None
+conn_str = conn_str if conn_str else args.C
+if not conn_str:
+ parser.print_help(file=sys.stderr)
+ exit()
+
+print('connecting: [%s]' % conn_str)
+cnxn = pyodbc.connect(conn_str, autocommit=True)
+cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8')
+
+cursor = cnxn.cursor()
+cursor.execute("drop database if exists db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create database db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129")
+##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("""
+INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?)
+""",
+"2020-12-12 00:00:00",
+'true',
+'-127',
+'-32767',
+'-2147483647',
+'-9223372036854775807',
+'-1.23e10',
+'-11.23e6',
+'abcdefghij'.encode('utf-8'),
+"人啊大发测试及abc")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("drop database if exists db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create database db");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))");
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)")
+cursor.close()
+
+cursor = cnxn.cursor()
+cursor.execute("select * from db.v")
+cursor.close()
+
+params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'),
+ ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'),
+ ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'),
+ ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ]
+cursor = cnxn.cursor()
+cursor.fast_executemany = True
+print('py:...................')
+cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params)
+print('py:...................')
+cursor.close()
+
+## cursor = cnxn.cursor()
+## cursor.execute("SELECT * from db.v where v1 > ?", 4)
+## row = cursor.fetchone()
+## while row:
+## print(row)
+## row = cursor.fetchone()
+## cursor.close()
+##
+## cursor = cnxn.cursor()
+## cursor.execute("SELECT * from db.v where v1 > ?", '5')
+## row = cursor.fetchone()
+## while row:
+## print(row)
+## row = cursor.fetchone()
+## cursor.close()
+
diff --git a/tests/examples/C#/taosdemo/Dockerfile b/tests/examples/C#/taosdemo/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..4eefc6c75248b1e1e1d6daf305386cca5b11e606
--- /dev/null
+++ b/tests/examples/C#/taosdemo/Dockerfile
@@ -0,0 +1,24 @@
+FROM tdengine/tdengine-beta:latest
+
+ENV DEBIAN_FRONTEND=noninteractive
+ARG MIRROR=archive.ubuntu.com
+RUN sed -Ei 's/\w+.ubuntu.com/'${MIRROR}'/' /etc/apt/sources.list && apt update && apt install mono-devel -y
+RUN apt-get install wget -y \
+ && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
+ && dpkg -i packages-microsoft-prod.deb \
+ && rm packages-microsoft-prod.deb \
+ && apt-get update && apt-get install -y dotnet-sdk-5.0
+COPY ./*.cs *.csproj /tmp/
+WORKDIR /tmp/
+RUN dotnet build -c Release && cp bin/Release/net5.0/taosdemo bin/Release/net5.0/taosdemo.* /usr/local/bin/ && rm -rf /tmp/*
+
+FROM tdengine/tdengine-beta:latest
+
+ENV DEBIAN_FRONTEND=noninteractive
+RUN apt-get update && apt-get install wget -y \
+ && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
+ && dpkg -i packages-microsoft-prod.deb \
+ && rm packages-microsoft-prod.deb \
+ && apt-get update && apt-get install -y dotnet-runtime-5.0
+COPY --from=0 /usr/local/bin/taosdemo* /usr/local/bin/
+CMD ["/usr/local/bin/taosdemo"]
diff --git a/tests/examples/C#/taosdemo/README.md b/tests/examples/C#/taosdemo/README.md
index 2d125fb140076c46c9abc4c60db330b28b494802..3cba3529bf513e2bf3d4ab0c169e7f3d03b2e6a8 100644
--- a/tests/examples/C#/taosdemo/README.md
+++ b/tests/examples/C#/taosdemo/README.md
@@ -1,13 +1,41 @@
+# C# Taosdemo
+
+## For Mono
+
install build environment
-===
+
+```sh
yum/apt install mono-complete
+```
-build C# version taosdemo
-===
+build C# version taosdemo.
+
+```sh
mcs -out:taosdemo *.cs
+./taosdemo --help
+```
+
+## For DotNet
+
+install dotnet environment.
+
+```sh
+wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
+ && dpkg -i packages-microsoft-prod.deb \
+ && rm packages-microsoft-prod.deb \
+ && apt-get update && apt-get install -y dotnet-sdk-5.0
+```
+
+Build DotNet version taosdemo.
+
+```sh
+dotnet build -c Release
+./bin/Release/net5.0/taosdemo --help
+```
+
+## Usage
-run C# version taosdemo
-===
+```
Usage: mono taosdemo.exe [OPTION...]
--help Show usage.
@@ -34,3 +62,4 @@ Usage: mono taosdemo.exe [OPTION...]
-v Print verbose output
-g Print debug output
-y Skip read key for continous test, default is not skip
+```
diff --git a/tests/examples/C#/taosdemo/taosdemo.csproj b/tests/examples/C#/taosdemo/taosdemo.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..15ec155d45e34aae7276fe596c177619dfddd3e9
--- /dev/null
+++ b/tests/examples/C#/taosdemo/taosdemo.csproj
@@ -0,0 +1,9 @@
+
+
+
+ Exe
+ net5.0
+ false
+
+
+
diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml
index fed00c147b87621c70d60ea206b06f1b0f3e8d8f..8cf0356721f8ffd568e87fa4a77c86eb0f90a62b 100644
--- a/tests/examples/JDBC/JDBCDemo/pom.xml
+++ b/tests/examples/JDBC/JDBCDemo/pom.xml
@@ -17,7 +17,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.31
+ 2.0.34
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
index d4ea5f919d2882e4f82b817380172eff20d7c611..5bc23403087578c0791b0a5e6fca74a47aad8184 100644
--- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
@@ -7,6 +7,9 @@ public class JdbcDemo {
private static String host;
private static final String dbName = "test";
private static final String tbName = "weather";
+ private static final String user = "root";
+ private static final String password = "taosdata";
+
private Connection connection;
public static void main(String[] args) {
@@ -30,10 +33,9 @@ public class JdbcDemo {
}
private void init() {
- final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password;
// get connection
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty("charset", "UTF-8");
properties.setProperty("locale", "en_US.UTF-8");
@@ -42,8 +44,7 @@ public class JdbcDemo {
connection = DriverManager.getConnection(url, properties);
if (connection != null)
System.out.println("[ OK ] Connection established.");
- } catch (ClassNotFoundException | SQLException e) {
- System.out.println("[ ERROR! ] Connection establish failed.");
+ } catch (SQLException e) {
e.printStackTrace();
}
}
@@ -74,7 +75,7 @@ public class JdbcDemo {
}
private void select() {
- final String sql = "select * from "+ dbName + "." + tbName;
+ final String sql = "select * from " + dbName + "." + tbName;
executeQuery(sql);
}
@@ -89,8 +90,6 @@ public class JdbcDemo {
}
}
- /************************************************************************/
-
private void executeQuery(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
@@ -117,7 +116,6 @@ public class JdbcDemo {
}
}
-
private void printSql(String sql, boolean succeed, long cost) {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
@@ -132,7 +130,6 @@ public class JdbcDemo {
long end = System.currentTimeMillis();
printSql(sql, false, (end - start));
e.printStackTrace();
-
}
}
@@ -141,5 +138,4 @@ public class JdbcDemo {
System.exit(0);
}
-
-}
\ No newline at end of file
+}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java
index 5bf980f6d84e53438573812aa9f07d8d463f08c3..d89476b8ca718dab24202e2320e842366533a763 100644
--- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java
@@ -4,14 +4,15 @@ import java.sql.*;
import java.util.Properties;
public class JdbcRestfulDemo {
- private static final String host = "127.0.0.1";
+ private static final String host = "localhost";
+ private static final String dbname = "test";
+ private static final String user = "root";
+ private static final String password = "taosdata";
public static void main(String[] args) {
try {
- // load JDBC-restful driver
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
// use port 6041 in url when use JDBC-restful
- String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
+ String url = "jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password;
Properties properties = new Properties();
properties.setProperty("charset", "UTF-8");
@@ -21,12 +22,12 @@ public class JdbcRestfulDemo {
Connection conn = DriverManager.getConnection(url, properties);
Statement stmt = conn.createStatement();
- stmt.execute("drop database if exists restful_test");
- stmt.execute("create database if not exists restful_test");
- stmt.execute("use restful_test");
- stmt.execute("create table restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))");
- stmt.executeUpdate("insert into t1 using restful_test.weather tags('北京') values(now, 18.2)");
- ResultSet rs = stmt.executeQuery("select * from restful_test.weather");
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname);
+ stmt.execute("use " + dbname);
+ stmt.execute("create table " + dbname + ".weather(ts timestamp, temperature float) tags(location nchar(64))");
+ stmt.executeUpdate("insert into t1 using " + dbname + ".weather tags('北京') values(now, 18.2)");
+ ResultSet rs = stmt.executeQuery("select * from " + dbname + ".weather");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
@@ -38,8 +39,6 @@ public class JdbcRestfulDemo {
rs.close();
stmt.close();
conn.close();
- } catch (ClassNotFoundException e) {
- e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java
index def4c649027034028d222bfedb71e37d82b99380..4c499b0b3abb518b48b222eca9bbbcb388bd2008 100644
--- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java
+++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SubscribeDemo.java
@@ -34,9 +34,8 @@ public class SubscribeDemo {
System.out.println(usage);
return;
}
- /*********************************************************************************************/
+
try {
- Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
diff --git a/tests/examples/JDBC/springbootdemo/pom.xml b/tests/examples/JDBC/springbootdemo/pom.xml
index 6c83718896cc2e5716f599ba08212d3dc8292133..9126813b67e71691692109920f891a6fb4cc5ab5 100644
--- a/tests/examples/JDBC/springbootdemo/pom.xml
+++ b/tests/examples/JDBC/springbootdemo/pom.xml
@@ -60,12 +60,15 @@
+
+ org.springframework.boot
+ spring-boot-starter-aop
+
+
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.28
-
-
+ 2.0.34
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
index fa10f3b0929e4c25c1379f489f73fc12ad9c1917..53edaa5796cccc7e4a4f274048c83a9ca7bbc7bb 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
@@ -4,7 +4,7 @@ import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
-@MapperScan(basePackages = {"com.taosdata.example.springbootdemo.dao"})
+@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
@SpringBootApplication
public class SpringbootdemoApplication {
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java
index cf14f5d84ace6348f38709ac3d3668ee8d2a0797..ed720fe6c02dd3a7eba6e645ea1e76d704c04d0c 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/controller/WeatherController.java
@@ -15,35 +15,21 @@ public class WeatherController {
@Autowired
private WeatherService weatherService;
- /**
- * create database and table
- *
- * @return
- */
+ @GetMapping("/lastOne")
+ public Weather lastOne() {
+ return weatherService.lastOne();
+ }
+
@GetMapping("/init")
public int init() {
return weatherService.init();
}
- /**
- * Pagination Query
- *
- * @param limit
- * @param offset
- * @return
- */
@GetMapping("/{limit}/{offset}")
public List queryWeather(@PathVariable Long limit, @PathVariable Long offset) {
return weatherService.query(limit, offset);
}
- /**
- * upload single weather info
- *
- * @param temperature
- * @param humidity
- * @return
- */
@PostMapping("/{temperature}/{humidity}")
public int saveWeather(@PathVariable float temperature, @PathVariable float humidity) {
return weatherService.save(temperature, humidity);
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java
index ad6733558a9d548be196cf8c9c0c63dc96227b39..d9202b45b4cc3dddf8e5a082ac339c1f88d4ec01 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.java
@@ -8,6 +8,8 @@ import java.util.Map;
public interface WeatherMapper {
+ Map lastOne();
+
void dropDB();
void createDB();
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
index 2d3e0540650f35c1018992795ac33fb6cb7c4837..91938ca24e3cf9c3e0f2895cf40f214d484c55d5 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
@@ -9,20 +9,48 @@
+
+ select last_row(*), location, groupid
+ from test.weather
+
+
- drop database if exists test
+ drop
+ database if exists test
- create database if not exists test
+ create
+ database if not exists test
- create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int)
+ create table if not exists test.weather
+ (
+ ts
+ timestamp,
+ temperature
+ float,
+ humidity
+ float,
+ note
+ binary
+ (
+ 64
+ )) tags
+ (
+ location nchar
+ (
+ 64
+ ), groupId int)
- create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId})
+ create table if not exists test.t#{groupId} using test.weather tags
+ (
+ #{location},
+ #{groupId}
+ )
@@ -36,25 +64,29 @@
- insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity})
+ insert into test.t#{groupId} (ts, temperature, humidity, note)
+ values (#{ts}, ${temperature}, ${humidity}, #{note})
- select tbname from test.weather
+ select tbname
+ from test.weather
- select count(*) from test.weather
+ select count(*)
+ from test.weather
-
-
-
+
+
+
- select avg(temperature), avg(humidity)from test.weather interval(1m)
+ select avg(temperature), avg(humidity)
+ from test.weather interval(1m)
\ No newline at end of file
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java
index c11b9a6f50655788d1e35eb9607a101d2d06c872..e4238127bd32b0f6ad21a514f3a1f07f6069b6d5 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/domain/Weather.java
@@ -11,6 +11,7 @@ public class Weather {
private Float temperature;
private Float humidity;
private String location;
+ private String note;
private int groupId;
public Weather() {
@@ -61,4 +62,12 @@ public class Weather {
public void setGroupId(int groupId) {
this.groupId = groupId;
}
+
+ public String getNote() {
+ return note;
+ }
+
+ public void setNote(String note) {
+ this.note = note;
+ }
}
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java
index 26d09c7d128015739cdb0a87956affa4910b4b4e..2264b200afc3e0c2b7dd8e496e607649f940581d 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/service/WeatherService.java
@@ -29,6 +29,7 @@ public class WeatherService {
Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100));
weather.setLocation(locations[random.nextInt(locations.length)]);
weather.setGroupId(i % locations.length);
+ weather.setNote("note-" + i);
weatherMapper.createTable(weather);
count += weatherMapper.insert(weather);
}
@@ -58,4 +59,21 @@ public class WeatherService {
public List avg() {
return weatherMapper.avg();
}
+
+ public Weather lastOne() {
+ Map result = weatherMapper.lastOne();
+
+ long ts = (long) result.get("ts");
+ float temperature = (float) result.get("temperature");
+ float humidity = (float) result.get("humidity");
+ String note = (String) result.get("note");
+ int groupId = (int) result.get("groupid");
+ String location = (String) result.get("location");
+
+ Weather weather = new Weather(new Timestamp(ts), temperature, humidity);
+ weather.setNote(note);
+ weather.setGroupId(groupId);
+ weather.setLocation(location);
+ return weather;
+ }
}
diff --git a/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java
new file mode 100644
index 0000000000000000000000000000000000000000..80dad1bd7d669ba6b912c7e5fa816c29b7e37c87
--- /dev/null
+++ b/tests/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/util/TaosAspect.java
@@ -0,0 +1,36 @@
+package com.taosdata.example.springbootdemo.util;
+
+import org.aspectj.lang.ProceedingJoinPoint;
+import org.aspectj.lang.annotation.Around;
+import org.aspectj.lang.annotation.Aspect;
+import org.springframework.stereotype.Component;
+
+import java.sql.Timestamp;
+import java.util.Map;
+
+@Aspect
+@Component
+public class TaosAspect {
+
+ @Around("execution(java.util.Map com.taosdata.example.springbootdemo.dao.*.*(..))")
+ public Object handleType(ProceedingJoinPoint joinPoint) {
+ Map result = null;
+ try {
+ result = (Map) joinPoint.proceed();
+ for (String key : result.keySet()) {
+ Object obj = result.get(key);
+ if (obj instanceof byte[]) {
+ obj = new String((byte[]) obj);
+ result.put(key, obj);
+ }
+ if (obj instanceof Timestamp) {
+ obj = ((Timestamp) obj).getTime();
+ result.put(key, obj);
+ }
+ }
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ return result;
+ }
+}
diff --git a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties
index 4d7e64d10576388827502a459df9e68da2721dbb..06daa81bbb06450d99ab3f6e640c9795c0ad5d2e 100644
--- a/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties
+++ b/tests/examples/JDBC/springbootdemo/src/main/resources/application.properties
@@ -1,22 +1,20 @@
# datasource config - JDBC-JNI
#spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
-#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
+#spring.datasource.url=jdbc:TAOS://localhost:6030/?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
#spring.datasource.username=root
#spring.datasource.password=taosdata
-
# datasource config - JDBC-RESTful
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
-spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
+spring.datasource.url=jdbc:TAOS-RS://localhsot:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
spring.datasource.username=root
spring.datasource.password=taosdata
-
spring.datasource.druid.initial-size=5
spring.datasource.druid.min-idle=5
spring.datasource.druid.max-active=5
spring.datasource.druid.max-wait=30000
spring.datasource.druid.validation-query=select server_status();
-
+spring.aop.auto=true
+spring.aop.proxy-target-class=true
#mybatis
mybatis.mapper-locations=classpath:mapper/*.xml
-
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
diff --git a/tests/examples/c/-g b/tests/examples/c/-g
new file mode 100755
index 0000000000000000000000000000000000000000..3909909e8fe531a7b6d35ca315b8277e7270bb02
Binary files /dev/null and b/tests/examples/c/-g differ
diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c
index 01169715f3e8b5b9d6e212b4b317ecca5fa4dbcd..03123afb3584ea94417c88e55edd9f8e232b0fe9 100644
--- a/tests/examples/c/apitest.c
+++ b/tests/examples/c/apitest.c
@@ -1,12 +1,16 @@
// sample code to verify all TDengine API
// to compile: gcc -o apitest apitest.c -ltaos
+#include "taoserror.h"
+#include "cJSON.h"
+
#include
#include
#include
#include
#include
+
static void prepare_data(TAOS* taos) {
TAOS_RES *result;
result = taos_query(taos, "drop database if exists test;");
@@ -1014,6 +1018,919 @@ int32_t verify_schema_less(TAOS* taos) {
return (code);
}
+void verify_telnet_insert(TAOS* taos) {
+ TAOS_RES *result;
+
+ result = taos_query(taos, "drop database if exists db;");
+ taos_free_result(result);
+ usleep(100000);
+ result = taos_query(taos, "create database db precision 'ms';");
+ taos_free_result(result);
+ usleep(100000);
+
+ (void)taos_select_db(taos, "db");
+ int32_t code = 0;
+
+ /* metric */
+ char* lines0[] = {
+ "stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
+ "stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
+ "stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
+ };
+ code = taos_insert_telnet_lines(taos, lines0, 3);
+ if (code) {
+ printf("lines0 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ /* timestamp */
+ char* lines1[] = {
+ "stb1 1626006833s 1i8 host=\"host0\"",
+ "stb1 1626006833639000000ns 2i8 host=\"host0\"",
+ "stb1 1626006833640000us 3i8 host=\"host0\"",
+ "stb1 1626006833641123 4i8 host=\"host0\"",
+ "stb1 1626006833651ms 5i8 host=\"host0\"",
+ "stb1 0 6i8 host=\"host0\"",
+ };
+ code = taos_insert_telnet_lines(taos, lines1, 6);
+ if (code) {
+ printf("lines1 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ /* metric value */
+ //tinyint
+ char* lines2_0[] = {
+ "stb2_0 1626006833651ms -127i8 host=\"host0\"",
+ "stb2_0 1626006833652ms 127i8 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_0, 2);
+ if (code) {
+ printf("lines2_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //smallint
+ char* lines2_1[] = {
+ "stb2_1 1626006833651ms -32767i16 host=\"host0\"",
+ "stb2_1 1626006833652ms 32767i16 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_1, 2);
+ if (code) {
+ printf("lines2_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //int
+ char* lines2_2[] = {
+ "stb2_2 1626006833651ms -2147483647i32 host=\"host0\"",
+ "stb2_2 1626006833652ms 2147483647i32 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_2, 2);
+ if (code) {
+ printf("lines2_2 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //bigint
+ char* lines2_3[] = {
+ "stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"",
+ "stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_3, 2);
+ if (code) {
+ printf("lines2_3 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //float
+ char* lines2_4[] = {
+ "stb2_4 1626006833610ms 3f32 host=\"host0\"",
+ "stb2_4 1626006833620ms -3f32 host=\"host0\"",
+ "stb2_4 1626006833630ms 3.4f32 host=\"host0\"",
+ "stb2_4 1626006833640ms -3.4f32 host=\"host0\"",
+ "stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"",
+ "stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"",
+ "stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"",
+ "stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"",
+ "stb2_4 1626006833690ms 3.15 host=\"host0\"",
+ "stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"",
+ "stb2_4 1626006833710ms -3.4E38f32 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_4, 11);
+ if (code) {
+ printf("lines2_4 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //double
+ char* lines2_5[] = {
+ "stb2_5 1626006833610ms 3f64 host=\"host0\"",
+ "stb2_5 1626006833620ms -3f64 host=\"host0\"",
+ "stb2_5 1626006833630ms 3.4f64 host=\"host0\"",
+ "stb2_5 1626006833640ms -3.4f64 host=\"host0\"",
+ "stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"",
+ "stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"",
+ "stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"",
+ "stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"",
+ "stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"",
+ "stb2_5 1626006833700ms -1.7E308f64 host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_5, 10);
+ if (code) {
+ printf("lines2_5 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //bool
+ char* lines2_6[] = {
+ "stb2_6 1626006833610ms t host=\"host0\"",
+ "stb2_6 1626006833620ms T host=\"host0\"",
+ "stb2_6 1626006833630ms true host=\"host0\"",
+ "stb2_6 1626006833640ms True host=\"host0\"",
+ "stb2_6 1626006833650ms TRUE host=\"host0\"",
+ "stb2_6 1626006833660ms f host=\"host0\"",
+ "stb2_6 1626006833670ms F host=\"host0\"",
+ "stb2_6 1626006833680ms false host=\"host0\"",
+ "stb2_6 1626006833690ms False host=\"host0\"",
+ "stb2_6 1626006833700ms FALSE host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_6, 10);
+ if (code) {
+ printf("lines2_6 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //binary
+ char* lines2_7[] = {
+ "stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"",
+ "stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"",
+ "stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\""
+ };
+ code = taos_insert_telnet_lines(taos, lines2_7, 3);
+ if (code) {
+ printf("lines2_7 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //nchar
+ char* lines2_8[] = {
+ "stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"",
+ "stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\"",
+ };
+ code = taos_insert_telnet_lines(taos, lines2_8, 2);
+ if (code) {
+ printf("lines2_8 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ /* tags */
+ //tag value types
+ char* lines3_0[] = {
+ "stb3_0 1626006833610ms 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"",
+ "stb3_0 1626006833610ms 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\""
+ };
+ code = taos_insert_telnet_lines(taos, lines3_0, 2);
+ if (code) {
+ printf("lines3_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ //tag ID as child table name
+ char* lines3_1[] = {
+ "stb3_1 1626006833610ms 1 id=\"child_table1\" host=\"host1\"",
+ "stb3_1 1626006833610ms 2 host=\"host2\" iD=\"child_table2\"",
+ "stb3_1 1626006833610ms 3 ID=\"child_table3\" host=\"host3\""
+ };
+ code = taos_insert_telnet_lines(taos, lines3_1, 3);
+ if (code) {
+ printf("lines3_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ return;
+}
+
+void verify_json_insert(TAOS* taos) {
+ TAOS_RES *result;
+
+ result = taos_query(taos, "drop database if exists db;");
+ taos_free_result(result);
+ usleep(100000);
+ result = taos_query(taos, "create database db precision 'ms';");
+ taos_free_result(result);
+ usleep(100000);
+
+ (void)taos_select_db(taos, "db");
+ int32_t code = 0;
+
+ char *message =
+ "{ \
+ \"metric\":\"cpu_load_0\", \
+ \"timestamp\": 1626006833610123, \
+ \"value\": 55.5, \
+ \"tags\": \
+ { \
+ \"host\": \"ubuntu\", \
+ \"interface1\": \"eth0\", \
+ \"Id\": \"tb0\" \
+ } \
+ }";
+
+ code = taos_insert_json_payload(taos, message);
+ if (code) {
+ printf("payload_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ char *message1 =
+ "[ \
+ { \
+ \"metric\":\"cpu_load_1\", \
+ \"timestamp\": 1626006833610123, \
+ \"value\": 55.5, \
+ \"tags\": \
+ { \
+ \"host\": \"ubuntu\", \
+ \"interface\": \"eth1\", \
+ \"Id\": \"tb1\" \
+ } \
+ }, \
+ { \
+ \"metric\":\"cpu_load_2\", \
+ \"timestamp\": 1626006833610123, \
+ \"value\": 55.5, \
+ \"tags\": \
+ { \
+ \"host\": \"ubuntu\", \
+ \"interface\": \"eth2\", \
+ \"Id\": \"tb2\" \
+ } \
+ } \
+ ]";
+
+ code = taos_insert_json_payload(taos, message1);
+ if (code) {
+ printf("payload_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+ char *message2 =
+ "[ \
+ { \
+ \"metric\":\"cpu_load_3\", \
+ \"timestamp\": \
+ { \
+ \"value\": 1626006833610123, \
+ \"type\": \"us\" \
+ }, \
+ \"value\": \
+ { \
+ \"value\": 55, \
+ \"type\": \"int\" \
+ }, \
+ \"tags\": \
+ { \
+ \"host\": \
+ { \
+ \"value\": \"ubuntu\", \
+ \"type\": \"binary\" \
+ }, \
+ \"interface\": \
+ { \
+ \"value\": \"eth3\", \
+ \"type\": \"nchar\" \
+ }, \
+ \"ID\": \"tb3\", \
+ \"port\": \
+ { \
+ \"value\": 4040, \
+ \"type\": \"int\" \
+ } \
+ } \
+ }, \
+ { \
+ \"metric\":\"cpu_load_4\", \
+ \"timestamp\": 1626006833610123, \
+ \"value\": 66.6, \
+ \"tags\": \
+ { \
+ \"host\": \"ubuntu\", \
+ \"interface\": \"eth4\", \
+ \"Id\": \"tb4\" \
+ } \
+ } \
+ ]";
+ code = taos_insert_json_payload(taos, message2);
+ if (code) {
+ printf("payload_2 code: %d, %s.\n", code, tstrerror(code));
+ }
+
+
+ cJSON *payload, *tags;
+ char *payload_str;
+
+ /* Default format */
+ //number
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_0");
+ cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123);
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //true
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_1");
+ cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123);
+ cJSON_AddTrueToObject(payload, "value");
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //false
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_2");
+ cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123);
+ cJSON_AddFalseToObject(payload, "value");
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_2 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //string
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_3");
+ cJSON_AddNumberToObject(payload, "timestamp", 1626006833610123);
+ cJSON_AddStringToObject(payload, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_3 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //timestamp 0 -> current time
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_4");
+ cJSON_AddNumberToObject(payload, "timestamp", 0);
+ cJSON_AddNumberToObject(payload, "value", 123);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_4 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //ID
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb0_5");
+ cJSON_AddNumberToObject(payload, "timestamp", 0);
+ cJSON_AddNumberToObject(payload, "value", 123);
+ tags = cJSON_CreateObject();
+ cJSON_AddStringToObject(tags, "ID", "tb0_5");
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddStringToObject(tags, "iD", "tb000");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddStringToObject(tags, "id", "tb555");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload0_5 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ /* Nested format */
+ //timestamp
+ cJSON *timestamp;
+ //seconds
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb1_0");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload1_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //milleseconds
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb1_1");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833610);
+ cJSON_AddStringToObject(timestamp, "type", "ms");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload1_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //microseconds
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb1_2");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833610123);
+ cJSON_AddStringToObject(timestamp, "type", "us");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload1_2 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //nanoseconds
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb1_3");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833610123321);
+ cJSON_AddStringToObject(timestamp, "type", "ns");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload1_3 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //now
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb1_4");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 0);
+ cJSON_AddStringToObject(timestamp, "type", "ns");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ cJSON_AddNumberToObject(payload, "value", 10);
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload1_4 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //metric value
+ cJSON *metric_val;
+ //bool
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_0");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddTrueToObject(metric_val, "value");
+ cJSON_AddStringToObject(metric_val, "type", "bool");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //tinyint
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_1");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 127);
+ cJSON_AddStringToObject(metric_val, "type", "tinyint");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_1 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //smallint
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_2");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 32767);
+ cJSON_AddStringToObject(metric_val, "type", "smallint");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_2 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //int
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_3");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 2147483647);
+ cJSON_AddStringToObject(metric_val, "type", "int");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_3 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //bigint
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_4");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 9223372036854775807);
+ cJSON_AddStringToObject(metric_val, "type", "bigint");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_4 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //float
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_5");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 11.12345);
+ cJSON_AddStringToObject(metric_val, "type", "float");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_5 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //double
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_6");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddNumberToObject(metric_val, "value", 22.123456789);
+ cJSON_AddStringToObject(metric_val, "type", "double");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_6 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //binary
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_7");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddStringToObject(metric_val, "value", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddStringToObject(metric_val, "type", "binary");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_7 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //nchar
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb2_8");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddStringToObject(metric_val, "value", "你好");
+ cJSON_AddStringToObject(metric_val, "type", "nchar");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tags, "t1");
+ cJSON_AddFalseToObject(tags, "t2");
+ cJSON_AddNumberToObject(tags, "t3", 10);
+ cJSON_AddStringToObject(tags, "t4", "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>");
+ cJSON_AddItemToObject(payload, "tags", tags);
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload2_8 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+
+ //tag value
+ cJSON *tag;
+
+ payload = cJSON_CreateObject();
+ cJSON_AddStringToObject(payload, "metric", "stb3_0");
+
+ timestamp = cJSON_CreateObject();
+ cJSON_AddNumberToObject(timestamp, "value", 1626006833);
+ cJSON_AddStringToObject(timestamp, "type", "s");
+ cJSON_AddItemToObject(payload, "timestamp", timestamp);
+
+ metric_val = cJSON_CreateObject();
+ cJSON_AddStringToObject(metric_val, "value", "hello");
+ cJSON_AddStringToObject(metric_val, "type", "nchar");
+ cJSON_AddItemToObject(payload, "value", metric_val);
+
+ tags = cJSON_CreateObject();
+
+ tag = cJSON_CreateObject();
+ cJSON_AddTrueToObject(tag, "value");
+ cJSON_AddStringToObject(tag, "type", "bool");
+ cJSON_AddItemToObject(tags, "t1", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddFalseToObject(tag, "value");
+ cJSON_AddStringToObject(tag, "type", "bool");
+ cJSON_AddItemToObject(tags, "t2", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 127);
+ cJSON_AddStringToObject(tag, "type", "tinyint");
+ cJSON_AddItemToObject(tags, "t3", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 32767);
+ cJSON_AddStringToObject(tag, "type", "smallint");
+ cJSON_AddItemToObject(tags, "t4", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 2147483647);
+ cJSON_AddStringToObject(tag, "type", "int");
+ cJSON_AddItemToObject(tags, "t5", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 9223372036854775807);
+ cJSON_AddStringToObject(tag, "type", "bigint");
+ cJSON_AddItemToObject(tags, "t6", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 11.12345);
+ cJSON_AddStringToObject(tag, "type", "float");
+ cJSON_AddItemToObject(tags, "t7", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddNumberToObject(tag, "value", 22.1234567890);
+ cJSON_AddStringToObject(tag, "type", "double");
+ cJSON_AddItemToObject(tags, "t8", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddStringToObject(tag, "value", "binary_val");
+ cJSON_AddStringToObject(tag, "type", "binary");
+ cJSON_AddItemToObject(tags, "t9", tag);
+
+ tag = cJSON_CreateObject();
+ cJSON_AddStringToObject(tag, "value", "你好");
+ cJSON_AddStringToObject(tag, "type", "nchar");
+ cJSON_AddItemToObject(tags, "t10", tag);
+
+ cJSON_AddItemToObject(payload, "tags", tags);
+
+ payload_str = cJSON_Print(payload);
+ //printf("%s\n", payload_str);
+
+ code = taos_insert_json_payload(taos, payload_str);
+ if (code) {
+ printf("payload3_0 code: %d, %s.\n", code, tstrerror(code));
+ }
+ free(payload_str);
+ cJSON_Delete(payload);
+}
+
int main(int argc, char *argv[]) {
const char* host = "127.0.0.1";
const char* user = "root";
@@ -1034,6 +1951,11 @@ int main(int argc, char *argv[]) {
printf("************ verify schema-less *************\n");
verify_schema_less(taos);
+ printf("************ verify telnet-insert *************\n");
+ verify_telnet_insert(taos);
+
+ printf("************ verify json-insert *************\n");
+ verify_json_insert(taos);
printf("************ verify query *************\n");
verify_query(taos);
@@ -1051,7 +1973,7 @@ int main(int argc, char *argv[]) {
verify_prepare2(taos);
printf("************ verify prepare3 *************\n");
verify_prepare3(taos);
-
+
printf("************ verify stream *************\n");
verify_stream(taos);
printf("done\n");
diff --git a/tests/examples/c/clientcfgtest-taosd.c b/tests/examples/c/clientcfgtest-taosd.c
new file mode 100644
index 0000000000000000000000000000000000000000..fbfbd8935a34481c23e806bbe461882ed9a10437
--- /dev/null
+++ b/tests/examples/c/clientcfgtest-taosd.c
@@ -0,0 +1,33 @@
+#include
+#include
+#include
+#include
+#include
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tulog.h"
+#include "tsocket.h"
+#include "tutil.h"
+extern SGlobalCfg *taosGetConfigOption(const char *option) ;
+int main( int argc, char *argv[]){
+
+ printf("start to test\n");
+
+ //case1:
+ //Test config to wrong type
+ const char config1[128] = "{\"cache\":\"4\"}";//input the parameter which want to be configured
+ taos_set_config(config1); //configure the parameter
+
+ SGlobalCfg *cfg1 ;
+
+ cfg1 = taosGetConfigOption("cache");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config cache to '4'success!\n");
+ else
+ printf("config cache failure!\n");
+ return 0 ;
+
+}
diff --git a/tests/examples/c/clientcfgtest-wrongjson.c b/tests/examples/c/clientcfgtest-wrongjson.c
new file mode 100644
index 0000000000000000000000000000000000000000..eecb5dae6d27c213731afdea005af3fc265dd47f
--- /dev/null
+++ b/tests/examples/c/clientcfgtest-wrongjson.c
@@ -0,0 +1,62 @@
+#include
+#include
+#include
+#include
+#include
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tulog.h"
+#include "tsocket.h"
+#include "tutil.h"
+extern SGlobalCfg *taosGetConfigOption(const char *option) ;
+int main( int argc, char *argv[]){
+
+ printf("start to test\n");
+
+ //case1:
+ //Test config with wrong JSON
+ //The result is failure
+ const char config1[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\135\"}";//input the parameter which want to be configured
+ taos_set_config(config1); //configure the parameter
+
+ SGlobalCfg *cfg1 ;
+ cfg1 = taosGetConfigOption("firstEp");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config firstEp 'BCC-2:6030'success!\n");
+ else
+ printf("config firstEp failure!\n");
+ SGlobalCfg *cfg2 ;
+ cfg2 = taosGetConfigOption("debugFlag");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config debugFlag '135'success!\n");
+ else
+ printf("config debugFlag failure!\n");
+
+
+ //case2:
+ //repair the JSON and try again
+ //The result is success
+ const char config2[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\"135\"}";//input the parameter which want to be configured
+ taos_set_config(config2); //configure the parameter
+
+ SGlobalCfg *cfg3 ;
+
+ cfg3 = taosGetConfigOption("firstEp");//check the option result
+ if(cfg3->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config firstEp 'BCC-2:6030'success!\n");
+ else
+ printf("config firstEp failure!\n");
+
+ SGlobalCfg *cfg4 ;
+
+ cfg4 = taosGetConfigOption("debugFlag");//check the option result
+ if(cfg4->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config debugFlag '135'success!\n");
+ else
+ printf("config debugFlag failure!\n");
+ return 0 ;
+
+}
diff --git a/tests/examples/c/clientcfgtest-wrongtype.c b/tests/examples/c/clientcfgtest-wrongtype.c
new file mode 100644
index 0000000000000000000000000000000000000000..d88cbeebe8e5114ed4836e77b9494de1cc54aba8
--- /dev/null
+++ b/tests/examples/c/clientcfgtest-wrongtype.c
@@ -0,0 +1,48 @@
+#include
+#include
+#include
+#include
+#include
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tulog.h"
+#include "tsocket.h"
+#include "tutil.h"
+extern SGlobalCfg *taosGetConfigOption(const char *option) ;
+int main( int argc, char *argv[]){
+
+ printf("start to test\n");
+
+ //case1:
+ //Test config to wrong type
+ //The result is failure
+ const char config1[128] = "{\"debugFlag\":\"9999999999999999999999999\"}";//input the parameter which want to be configured
+ taos_set_config(config1); //configure the parameter
+
+ SGlobalCfg *cfg1 ;
+
+ cfg1 = taosGetConfigOption("debugFlag");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config debugFlag '9999999999999999999999999\n");
+ else
+ printf("config debugFlag failure!\n");
+
+ //case2:
+ //Try again with right parameter
+ //The result is failure
+ const char config2[128] = "{\"debugFlag\":\"135\"}";//input the parameter which want to be configured
+ taos_set_config(config2); //configure the parameter
+
+ SGlobalCfg *cfg2 ;
+
+ cfg2 = taosGetConfigOption("debugFlag");//check the option result
+ if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config debugflag '135'success!\n");
+ else
+ printf("config debugflag failure!\n");
+ return 0 ;
+
+}
diff --git a/tests/examples/c/clientcfgtest-wrongvalue.c b/tests/examples/c/clientcfgtest-wrongvalue.c
new file mode 100644
index 0000000000000000000000000000000000000000..f0d44a47f62696d14844ea12276b74da7d0ff408
--- /dev/null
+++ b/tests/examples/c/clientcfgtest-wrongvalue.c
@@ -0,0 +1,46 @@
+#include
+#include
+#include
+#include
+#include
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tulog.h"
+#include "tsocket.h"
+#include "tutil.h"
+extern SGlobalCfg *taosGetConfigOption(const char *option) ;
+int main( int argc, char *argv[]){
+
+ printf("start to test\n");
+
+ //case1:
+ //Test config to wrong type
+ const char config1[128] = "{\"rpcTimer\":\"0\"}";//input the parameter which want to be configured
+ taos_set_config(config1); //configure the parameter
+
+ SGlobalCfg *cfg1 ;
+
+ cfg1 = taosGetConfigOption("rpcTimer");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config rpcTimer to '0'success!\n");
+ else
+ printf("config rpcTimer failure!\n");
+
+ //case2:
+ //Try again with right parameter
+ const char config2[128] = "{\"rpcTimer\":\"400\"}";//input the parameter which want to be configured
+ taos_set_config(config2); //configure the parameter
+
+ SGlobalCfg *cfg2 ;
+
+ cfg2 = taosGetConfigOption("rpcTimer");//check the option result
+ if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config rpcTimer '400'success!\n");
+ else
+ printf("config rpcTimer failure!\n");
+ return 0 ;
+
+}
diff --git a/tests/examples/c/clientcfgtest.c b/tests/examples/c/clientcfgtest.c
new file mode 100644
index 0000000000000000000000000000000000000000..5f8f51cdb1156a25544273fc6419f65b86ea4ecc
--- /dev/null
+++ b/tests/examples/c/clientcfgtest.c
@@ -0,0 +1,55 @@
+#include
+#include
+#include
+#include
+#include
+#include "os.h"
+#include "taosdef.h"
+#include "taoserror.h"
+#include "tconfig.h"
+#include "tglobal.h"
+#include "tulog.h"
+#include "tsocket.h"
+#include "tutil.h"
+extern SGlobalCfg *taosGetConfigOption(const char *option) ;
+int main( int argc, char *argv[]){
+
+ printf("start to test\n");
+
+ //case1:
+ //Test config firstEp success
+ const char config1[128] = "{\"firstEp\":\"BCC-2:6030\",\"debugFlag\":\"135\"}";//input the parameter which want to be configured
+ taos_set_config(config1); //configure the parameter
+
+ SGlobalCfg *cfg1 ;
+
+ cfg1 = taosGetConfigOption("firstEp");//check the option result
+ if(cfg1->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config firstEp 'BCC-2:6030'success!\n");
+ else
+ printf("config firstEp failure!\n");
+
+
+ SGlobalCfg *cfg2 ;
+
+ cfg2 = taosGetConfigOption("debugFlag");//check the option result
+ if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config debugFlag '135' success!\n");
+ else
+ printf("config debugFlag failure!\n");
+ //case2:
+ //Test config only useful at the first time
+ //The result is failure
+ const char config2[128] = "{\"fqdn\":\"BCC-3\"}";//input the parameter which want to be configured
+ taos_set_config(config2); //configure the parameter
+
+ SGlobalCfg *cfg3 ;
+
+ cfg2 = taosGetConfigOption("fqdn");//check the option result
+ if(cfg2->cfgStatus == 3) //If cfgStatus is 3,it means configure is success
+ printf("config fqdn to 'BCC-3'success!\n");
+ else
+ printf("config fqdn failure!\n");
+ return 0 ;
+
+}
diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile
index 304623c27af27cd23a301af134647fb3b9746d64..f364eb76fc34ab0975c00dcae2b8348e58b38517 100644
--- a/tests/examples/c/makefile
+++ b/tests/examples/c/makefile
@@ -6,8 +6,8 @@ TARGET=exe
LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
- -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99
-
+ -Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
+ -I../../../deps/cJson/inc
all: $(TARGET)
exe:
@@ -17,6 +17,12 @@ exe:
gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS)
gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS)
gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS)
+ gcc $(CFLAGS) ./clientcfgtest.c -o $(ROOT)clientcfgtest $(LFLAGS)
+ gcc $(CFLAGS) ./clientcfgtest-wrongtype.c -o $(ROOT)clientcfgtest-wrongtype $(LFLAGS)
+ gcc $(CFLAGS) ./clientcfgtest-wrongjson.c -o $(ROOT)clientcfgtest-wrongjson $(LFLAGS)
+ gcc $(CFLAGS) ./clientcfgtest-wrongvalue.c -o $(ROOT)clientcfgtest-wrongvalue $(LFLAGS)
+ gcc $(CFLAGS) ./clientcfgtest-taosd.c -o $(ROOT)clientcfgtest-taosd $(LFLAGS)
+
clean:
rm $(ROOT)asyncdemo
@@ -26,3 +32,9 @@ clean:
rm $(ROOT)stream
rm $(ROOT)subscribe
rm $(ROOT)apitest
+ rm $(ROOT)clientcfgtest
+ rm $(ROOT)clientcfgtest-wrongtype
+ rm $(ROOT)clientcfgtest-wrongjson
+ rm $(ROOT)clientcfgtest-wrongvalue
+ rm $(ROOT)clientcfgtest-taosd
+
diff --git a/tests/examples/c/schemaless.c b/tests/examples/c/schemaless.c
index 3ea199c9144950526e4bbf59b9356753e2a88da6..1a551cc5f7bd600ccaf87701953f7109743e8302 100644
--- a/tests/examples/c/schemaless.c
+++ b/tests/examples/c/schemaless.c
@@ -61,7 +61,7 @@ int main(int argc, char* argv[]) {
time_t ct = time(0);
int64_t ts = ct * 1000;
- char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms";
+ char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms";
char** lines = calloc(numSuperTables * numChildTables * numRowsPerChildTable, sizeof(char*));
int l = 0;
@@ -75,7 +75,7 @@ int main(int argc, char* argv[]) {
}
}
}
- shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
+ //shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
printf("%s\n", "begin taos_insert_lines");
int64_t begin = getTimeInUs();
@@ -83,119 +83,5 @@ int main(int argc, char* argv[]) {
int64_t end = getTimeInUs();
printf("code: %d, %s. time used: %"PRId64"\n", code, tstrerror(code), end-begin);
- char* lines_000_0[] = {
- "sta1,id=sta1_1,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us"
- };
-
- code = taos_insert_lines(taos, lines_000_0 , sizeof(lines_000_0)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_000_0 should return error\n");
- return -1;
- }
-
- char* lines_000_1[] = {
- "sta2,id=\"sta2_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001"
- };
-
- code = taos_insert_lines(taos, lines_000_1 , sizeof(lines_000_1)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_000_1 should return error\n");
- return -1;
- }
-
- char* lines_000_2[] = {
- "sta3,id=\"sta3_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0"
- };
-
- code = taos_insert_lines(taos, lines_000_2 , sizeof(lines_000_2)/sizeof(char*));
- if (0 != code) {
- printf("taos_insert_lines() lines_000_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_001_0[] = {
- "sta4,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us",
-
- };
-
- code = taos_insert_lines(taos, lines_001_0 , sizeof(lines_001_0)/sizeof(char*));
- if (0 != code) {
- printf("taos_insert_lines() lines_001_0 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_001_1[] = {
- "sta5,id=\"sta5_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001"
- };
-
- code = taos_insert_lines(taos, lines_001_1 , sizeof(lines_001_1)/sizeof(char*));
- if (0 != code) {
- printf("taos_insert_lines() lines_001_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_001_2[] = {
- "sta6,id=\"sta6_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0"
- };
-
- code = taos_insert_lines(taos, lines_001_2 , sizeof(lines_001_2)/sizeof(char*));
- if (0 != code) {
- printf("taos_insert_lines() lines_001_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_002[] = {
- "stb,id=\"stb_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000000ns",
- "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639019us",
- "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833640ms",
- "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006834s"
- };
-
- code = taos_insert_lines(taos, lines_002 , sizeof(lines_002)/sizeof(char*));
- if (0 != code) {
- printf("taos_insert_lines() lines_002 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- //Duplicate key check;
- char* lines_003_1[] = {
- "std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s"
- };
-
- code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_003_2[] = {
- "std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s"
- };
-
- code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_003_3[] = {
- "std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s"
- };
-
- code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
-
- char* lines_003_4[] = {
- "std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s"
- };
-
- code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*));
- if (0 == code) {
- printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code));
- return -1;
- }
return 0;
}
diff --git a/tests/examples/nodejs/test1970.js b/tests/examples/nodejs/test1970.js
new file mode 100644
index 0000000000000000000000000000000000000000..5177a7371e9a07fa7b548936ff038c1f2a29bc1f
--- /dev/null
+++ b/tests/examples/nodejs/test1970.js
@@ -0,0 +1,125 @@
+const taos = require('td2.0-connector');
+var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
+var c1 = conn.cursor(); // Initializing a new cursor
+
+let stime = new Date();
+let interval = 1000;
+
+function convertDateToTS(date) {
+ let tsArr = date.toISOString().split("T")
+ return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
+}
+
+function R(l, r) {
+ return Math.random() * (r - l) - r;
+}
+
+function randomBool() {
+ if (Math.random() < 0.5) {
+ return true;
+ }
+ return false;
+}
+
+// Initialize
+const dbname = "nodejs_1970_db";
+const tbname = "t1";
+
+let dropDB = "drop database if exists " + dbname
+console.log(dropDB);//asdasdasd
+c1.execute(dropDB);///asdasd
+
+let createDB = "create database " + dbname + " keep 36500"
+console.log(createDB);
+c1.execute(createDB);
+
+let useTbl = "use " + dbname
+console.log(useTbl)
+c1.execute(useTbl);
+
+let createTbl = "create table if not exists " + tbname + "(ts timestamp,id int)"
+console.log(createTbl);
+c1.execute(createTbl);
+
+//1969-12-31 23:59:59.999
+//1970-01-01 00:00:00.000
+//1970-01-01 07:59:59.999
+//1970-01-01 08:00:00.000a
+//1628928479484 2021-08-14 08:07:59.484
+let sql1 = "insert into " + dbname + "." + tbname + " values('1969-12-31 23:59:59.999',1)"
+console.log(sql1);
+c1.execute(sql1);
+
+let sql2 = "insert into " + dbname + "." + tbname + " values('1970-01-01 00:00:00.000',2)"
+console.log(sql2);
+c1.execute(sql2);
+
+let sql3 = "insert into " + dbname + "." + tbname + " values('1970-01-01 07:59:59.999',3)"
+console.log(sql3);
+c1.execute(sql3);
+
+let sql4 = "insert into " + dbname + "." + tbname + " values('1970-01-01 08:00:00.000',4)"
+console.log(sql4);
+c1.execute(sql4);
+
+let sql5 = "insert into " + dbname + "." + tbname + " values('2021-08-14 08:07:59.484',5)"
+console.log(sql5);
+c1.execute(sql5);
+
+// Select
+let query1 = "select * from " + dbname + "." + tbname
+console.log(query1);
+c1.execute(query1);
+
+var d = c1.fetchall();
+console.log(c1.fields);
+for (let i = 0; i < d.length; i++)
+ console.log(d[i][0].valueOf());
+
+//initialize
+let initSql1 = "drop table if exists " + tbname
+console.log(initSql1);
+c1.execute(initSql1);
+
+console.log(createTbl);
+c1.execute(createTbl);
+c1.execute(useTbl)
+
+//-28800001 1969-12-31 23:59:59.999
+//-28800000 1970-01-01 00:00:00.000
+//-1 1970-01-01 07:59:59.999
+//0 1970-01-01 08:00:00.00
+//1628928479484 2021-08-14 08:07:59.484
+let sql11 = "insert into " + dbname + "." + tbname + " values(-28800001,11)";
+console.log(sql11);
+c1.execute(sql11);
+
+let sql12 = "insert into " + dbname + "." + tbname + " values(-28800000,12)"
+console.log(sql12);
+c1.execute(sql12);
+
+let sql13 = "insert into " + dbname + "." + tbname + " values(-1,13)"
+console.log(sql13);
+c1.execute(sql13);
+
+let sql14 = "insert into " + dbname + "." + tbname + " values(0,14)"
+console.log(sql14);
+c1.execute(sql14);
+
+let sql15 = "insert into " + dbname + "." + tbname + " values(1628928479484,15)"
+console.log(sql15);
+c1.execute(sql15);
+
+// Select
+console.log(query1);
+c1.execute(query1);
+
+var d = c1.fetchall();
+console.log(c1.fields);
+for (let i = 0; i < d.length; i++)
+ console.log(d[i][0].valueOf());
+
+setTimeout(function () {
+ conn.close();
+}, 2000);
+
diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat
index efd8961bb0be2eb6f20e291114b92b00469b984f..2a96ee31eb6211dbc5f300fbb2f3d62c03df3061 100755
--- a/tests/gotest/batchtest.bat
+++ b/tests/gotest/batchtest.bat
@@ -1,3 +1,4 @@
+
@echo off
echo ==== start Go connector test cases test ====
cd /d %~dp0
@@ -18,3 +19,10 @@ rem case002.bat
:: cd case002
:: case002.bat
+
+
+rem cd nanosupport
+rem nanoCase.bat
+
+:: cd nanosupport
+:: nanoCase.bat
\ No newline at end of file
diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh
index 0fbbf40714b3349651beea9302e66628b31a22ac..503d77b226885b10e3874a3e0718789bed34b200 100755
--- a/tests/gotest/batchtest.sh
+++ b/tests/gotest/batchtest.sh
@@ -17,5 +17,6 @@ go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
bash ./case001/case001.sh $severIp $serverPort
-#bash ./case002/case002.sh $severIp $serverPort
+bash ./case002/case002.sh $severIp $serverPort
#bash ./case003/case003.sh $severIp $serverPort
+bash ./nanosupport/nanoCase.sh $severIp $serverPort
diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go
index 9e912aab99e2aa0da1e1490741f04e67ab1d0c8a..29bc92f2a0668b3f576145d5bd6d08ed37c82f1b 100644
--- a/tests/gotest/case001/case001.go
+++ b/tests/gotest/case001/case001.go
@@ -12,14 +12,13 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-
package main
import (
"database/sql"
"flag"
"fmt"
- _ "github.com/taosdata/driver-go/taosSql"
+ _ "github.com/taosdata/driver-go/v2/taosSql"
"log"
"strconv"
"time"
@@ -63,6 +62,7 @@ func main() {
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/"
// open connect to taos server
+ fmt.Printf("url:%s",url)
db, err := sql.Open(taosDriverName, url)
if err != nil {
log.Fatalf("Open database error: %s\n", err)
@@ -168,17 +168,18 @@ func insert_data(db *sql.DB, demot string) {
func select_data(db *sql.DB, demot string) {
st := time.Now().Nanosecond()
-
+ fmt.Println(demot)
rows, err := db.Query("select * from ? ", demot) // go text mode
+ fmt.Println("end query",err)
checkErr(err, "select db.Query")
fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv")
var affectd int
//decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。
-
+ fmt.Println("start next")
for rows.Next() {
- var ts string
+ var ts time.Time
var name string
var id int
var len int8
@@ -188,6 +189,7 @@ func select_data(db *sql.DB, demot string) {
var dv float64
err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv)
+ fmt.Println("rows:",err)
checkErr(err, "select rows.Scan")
fmt.Printf("%s|\t", ts)
diff --git a/tests/gotest/case002/case002.bat b/tests/gotest/case002/case002.bat
new file mode 100644
index 0000000000000000000000000000000000000000..385677acae826e248a410472bfc7a022ff3003ab
--- /dev/null
+++ b/tests/gotest/case002/case002.bat
@@ -0,0 +1,9 @@
+@echo off
+echo ==== start run cases002.go
+
+del go.*
+go mod init demotest
+go build
+demotest.exe -h %1 -p %2
+cd ..
+
diff --git a/tests/gotest/case002/case002.go b/tests/gotest/case002/case002.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2ba5ea28ee4f92cfbdca27c78d47268a387c693
--- /dev/null
+++ b/tests/gotest/case002/case002.go
@@ -0,0 +1,80 @@
+package main
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ taos "github.com/taosdata/driver-go/v2/af"
+)
+
+func Subscribe_check(topic taos.Subscriber, check int) bool {
+ count := 0
+ rows, err := topic.Consume()
+ defer func() { rows.Close(); time.Sleep(time.Second) }()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(3)
+ }
+ for {
+ values := make([]driver.Value, 2)
+ err := rows.Next(values)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(4)
+ }
+ count++
+ }
+ if count == check {
+ return false
+ } else {
+ return true
+ }
+}
+func main() {
+ ts := 1630461600000
+ db, err := taos.Open("127.0.0.1", "", "", "", 0)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ defer db.Close()
+ db.Exec("drop database if exists test")
+ db.Exec("create database if not exists test ")
+ db.Exec("use test")
+ db.Exec("create table test (ts timestamp ,level int)")
+ for i := 0; i < 10; i++ {
+ sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i)
+ db.Exec(sqlcmd)
+ }
+
+ fmt.Println("consumption 01.")
+ topic, err := db.Subscribe(false, "test", "select ts, level from test", time.Second)
+ if Subscribe_check(topic, 10) {
+ os.Exit(3)
+ }
+
+ fmt.Println("consumption 02: no new rows inserted")
+ if Subscribe_check(topic, 0) {
+ os.Exit(3)
+ }
+
+ fmt.Println("consumption 03: after one new rows inserted")
+ sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+10, 10)
+ db.Exec(sqlcmd)
+ if Subscribe_check(topic, 1) {
+ os.Exit(3)
+ }
+
+ fmt.Println("consumption 04: keep progress and continue previous subscription")
+ topic.Unsubscribe(true)
+ topic, err = db.Subscribe(false, "test", "select ts, level from test", time.Second)
+ if Subscribe_check(topic, 0) {
+ os.Exit(3)
+ }
+
+}
diff --git a/tests/gotest/case002/case002.sh b/tests/gotest/case002/case002.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d98337cce7cfeb51ec9305226b20abdd7b360a46
--- /dev/null
+++ b/tests/gotest/case002/case002.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+echo "==== start run cases002.go"
+
+set +e
+#set -x
+
+script_dir="$(dirname $(readlink -f $0))"
+#echo "pwd: $script_dir, para0: $0"
+
+#execName=$0
+#execName=`echo ${execName##*/}`
+#goName=`echo ${execName%.*}`
+
+###### step 3: start build
+cd $script_dir
+rm -f go.*
+go mod init demotest > /dev/null 2>&1
+go mod tidy > /dev/null 2>&1
+go build > /dev/null 2>&1
+sleep 1s
+./demotest -h $1 -p $2
diff --git a/tests/gotest/nanosupport/connector/executor.go b/tests/gotest/nanosupport/connector/executor.go
new file mode 100644
index 0000000000000000000000000000000000000000..218ea29af3b34a8cfb5ab56585eeb07bc467d209
--- /dev/null
+++ b/tests/gotest/nanosupport/connector/executor.go
@@ -0,0 +1,208 @@
+package connector
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/taosdata/go-utils/log"
+ "github.com/taosdata/go-utils/tdengine/config"
+ "github.com/taosdata/go-utils/tdengine/connector"
+ tdengineExecutor "github.com/taosdata/go-utils/tdengine/executor"
+)
+
+type Executor struct {
+ executor *tdengineExecutor.Executor
+ ctx context.Context
+}
+
+var Logger = log.NewLogger("taos test")
+
+func NewExecutor(conf *config.TDengineGo, db string, showSql bool) (*Executor, error) {
+ tdengineConnector, err := connector.NewTDengineConnector("go", conf)
+ if err != nil {
+ return nil, err
+ }
+ executor := tdengineExecutor.NewExecutor(tdengineConnector, db, showSql, Logger)
+ return &Executor{
+ executor: executor,
+ ctx: context.Background(),
+ }, nil
+}
+
+func (e *Executor) Execute(sql string) (int64, error) {
+ return e.executor.DoExec(e.ctx, sql)
+}
+func (e *Executor) Query(sql string) (*connector.Data, error) {
+ fmt.Println("query :", sql)
+ return e.executor.DoQuery(e.ctx, sql)
+}
+func (e *Executor) CheckData(row, col int, value interface{}, data *connector.Data) (bool, error) {
+ if data == nil {
+ return false, fmt.Errorf("data is nil")
+ }
+ if col >= len(data.Head) {
+ return false, fmt.Errorf("col out of data")
+ }
+ if row >= len(data.Data) {
+ return false, fmt.Errorf("row out of data")
+ }
+ dataValue := data.Data[row][col]
+
+ if dataValue == nil && value != nil {
+ return false, fmt.Errorf("dataValue is nil but value is not nil")
+ }
+ if dataValue == nil && value == nil {
+ return true, nil
+ }
+ if reflect.TypeOf(dataValue) != reflect.TypeOf(value) {
+ return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue))
+ }
+ switch value.(type) {
+ case time.Time:
+ t, _ := dataValue.(time.Time)
+ if value.(time.Time).Nanosecond() != t.Nanosecond() {
+ return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond())
+ }
+ case string:
+ if value.(string) != dataValue.(string) {
+ return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string))
+ }
+ case int8:
+ if value.(int8) != dataValue.(int8) {
+ return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8))
+ }
+ case int16:
+ if value.(int16) != dataValue.(int16) {
+ return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16))
+ }
+ case int32:
+ if value.(int32) != dataValue.(int32) {
+ return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32))
+ }
+ case int64:
+ if value.(int64) != dataValue.(int64) {
+ return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64))
+ }
+ case float32:
+ if value.(float32) != dataValue.(float32) {
+ return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
+ }
+ case float64:
+ if value.(float64) != dataValue.(float64) {
+ return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
+ }
+ case bool:
+ if value.(bool) != dataValue.(bool) {
+ return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool))
+ }
+ default:
+ return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value))
+ }
+ return true, nil
+}
+
+func (e *Executor) CheckData2(row, col int, value interface{}, data *connector.Data) {
+
+ match, err := e.CheckData(row, col, value, data)
+ fmt.Println("expect data is :", value)
+ fmt.Println("go got data is :", data.Data[row][col])
+ if err != nil {
+ fmt.Println(err)
+ }
+ if !match {
+ fmt.Println(" data not match")
+
+ }
+
+ /*
+ fmt.Println(value)
+ if data == nil {
+ // return false, fmt.Errorf("data is nil")
+ // fmt.Println("check failed")
+ }
+ if col >= len(data.Head) {
+ // return false, fmt.Errorf("col out of data")
+ // fmt.Println("check failed")
+ }
+ if row >= len(data.Data) {
+ // return false, fmt.Errorf("row out of data")
+ // fmt.Println("check failed")
+ }
+ dataValue := data.Data[row][col]
+
+ if dataValue == nil && value != nil {
+ // return false, fmt.Errorf("dataValue is nil but value is not nil")
+ // fmt.Println("check failed")
+ }
+ if dataValue == nil && value == nil {
+ // return true, nil
+ fmt.Println("check pass")
+ }
+ if reflect.TypeOf(dataValue) != reflect.TypeOf(value) {
+ // return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue))
+ fmt.Println("check failed")
+ }
+ switch value.(type) {
+ case time.Time:
+ t, _ := dataValue.(time.Time)
+ if value.(time.Time).Nanosecond() != t.Nanosecond() {
+ // return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond())
+ // fmt.Println("check failed")
+ }
+ case string:
+ if value.(string) != dataValue.(string) {
+ // return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string))
+ // fmt.Println("check failed")
+ }
+ case int8:
+ if value.(int8) != dataValue.(int8) {
+ // return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8))
+ // fmt.Println("check failed")
+ }
+ case int16:
+ if value.(int16) != dataValue.(int16) {
+ // return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16))
+ // fmt.Println("check failed")
+ }
+ case int32:
+ if value.(int32) != dataValue.(int32) {
+ // return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32))
+ // fmt.Println("check failed")
+ }
+ case int64:
+ if value.(int64) != dataValue.(int64) {
+ // return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64))
+ // fmt.Println("check failed")
+ }
+ case float32:
+ if value.(float32) != dataValue.(float32) {
+ // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
+ // fmt.Println("check failed")
+ }
+ case float64:
+ if value.(float64) != dataValue.(float64) {
+ // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32))
+ // fmt.Println("check failed")
+ }
+ case bool:
+ if value.(bool) != dataValue.(bool) {
+ // return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool))
+ // fmt.Println("check failed")
+ }
+ default:
+ // return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value))
+ // fmt.Println("check failed")
+ }
+ // return true, nil
+ // fmt.Println("check pass")
+ */
+}
+
+func (e *Executor) CheckRow(count int, data *connector.Data) {
+
+ if len(data.Data) != count {
+ fmt.Println("check failed !")
+ }
+}
diff --git a/tests/gotest/nanosupport/nanoCase.bat b/tests/gotest/nanosupport/nanoCase.bat
new file mode 100644
index 0000000000000000000000000000000000000000..86bddd5b02c5399d5b8d70bd08020e96a7d1c0e5
--- /dev/null
+++ b/tests/gotest/nanosupport/nanoCase.bat
@@ -0,0 +1,9 @@
+@echo off
+echo ==== start run nanosupport.go
+
+del go.*
+go mod init nano
+go mod tidy
+go build
+nano.exe -h %1 -p %2
+cd ..
diff --git a/tests/gotest/nanosupport/nanoCase.sh b/tests/gotest/nanosupport/nanoCase.sh
new file mode 100644
index 0000000000000000000000000000000000000000..bec8929f14c0a56e7c4074efa39d1e1e881fb12e
--- /dev/null
+++ b/tests/gotest/nanosupport/nanoCase.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+echo "==== start run nanosupport.go "
+
+set +e
+#set -x
+
+script_dir="$(dirname $(readlink -f $0))"
+#echo "pwd: $script_dir, para0: $0"
+
+#execName=$0
+#execName=`echo ${execName##*/}`
+#goName=`echo ${execName%.*}`
+
+###### step 3: start build
+cd $script_dir
+rm -f go.*
+go mod init nano
+go mod tidy
+go build
+sleep 10s
+./nano -h $1 -p $2
diff --git a/tests/gotest/nanosupport/nanosupport.go b/tests/gotest/nanosupport/nanosupport.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2f24a73c0a6db3c94b90879c73d0f05e2476307
--- /dev/null
+++ b/tests/gotest/nanosupport/nanosupport.go
@@ -0,0 +1,269 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "nano/connector"
+ "time"
+
+ "github.com/taosdata/go-utils/tdengine/config"
+)
+
+func main() {
+ e, err := connector.NewExecutor(&config.TDengineGo{
+ Address: "root:taosdata@/tcp(127.0.0.1:6030)/",
+ MaxIdle: 20,
+ MaxOpen: 30,
+ MaxLifetime: 30,
+ }, "db", false)
+ if err != nil {
+ panic(err)
+ }
+ prepareData(e)
+ data, err := e.Query("select * from tb")
+ if err != nil {
+ panic(err)
+ }
+
+ layout := "2006-01-02 15:04:05.999999999"
+ t0, _ := time.Parse(layout, "2021-06-10 00:00:00.100000001")
+ t1, _ := time.Parse(layout, "2021-06-10 00:00:00.150000000")
+ t2, _ := time.Parse(layout, "2021-06-10 00:00:00.299999999")
+ t3, _ := time.Parse(layout, "2021-06-10 00:00:00.300000000")
+ t4, _ := time.Parse(layout, "2021-06-10 00:00:00.300000001")
+ t5, _ := time.Parse(layout, "2021-06-10 00:00:00.999999999")
+
+ e.CheckData2(0, 0, t0, data)
+ e.CheckData2(1, 0, t1, data)
+ e.CheckData2(2, 0, t2, data)
+ e.CheckData2(3, 0, t3, data)
+ e.CheckData2(4, 0, t4, data)
+ e.CheckData2(5, 0, t5, data)
+ e.CheckData2(3, 1, int32(3), data)
+ e.CheckData2(4, 1, int32(5), data)
+ e.CheckData2(5, 1, int32(7), data)
+
+ fmt.Println(" start check nano support!")
+
+ data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000001\" and ts < \"2021-06-10 0:00:00.160000000\";")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;")
+ e.CheckData2(0, 0, int64(1), data)
+ data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000000\" and ts < \"2021-06-10 0:00:00.150000000\";")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb where ts > 1623254400400000000;")
+ e.CheckData2(0, 0, int64(1), data)
+ data, _ = e.Query("select count(*) from tb where ts < \"2021-06-10 00:00:00.400000000\";")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb where ts < now + 400000000b;")
+ e.CheckData2(0, 0, int64(6), data)
+
+ data, _ = e.Query("select count(*) from tb where ts >= \"2021-06-10 0:00:00.100000001\";")
+ e.CheckData2(0, 0, int64(6), data)
+
+ data, _ = e.Query("select count(*) from tb where ts <= 1623254400300000000;")
+ e.CheckData2(0, 0, int64(4), data)
+
+ data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.000000000\";")
+
+ data, _ = e.Query("select count(*) from tb where ts = 1623254400150000000;")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.100000001\";")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb where ts between \"2021-06-10 0:00:00.299999999\" and \"2021-06-10 0:00:00.300000001\";")
+ e.CheckData2(0, 0, int64(3), data)
+
+ data, _ = e.Query("select avg(speed) from tb interval(5000000000b);")
+ e.CheckRow(1, data)
+
+ data, _ = e.Query("select avg(speed) from tb interval(100000000b)")
+ e.CheckRow(4, data)
+
+ data, _ = e.Query("select avg(speed) from tb interval(1000b);")
+ e.CheckRow(5, data)
+
+ data, _ = e.Query("select avg(speed) from tb interval(1u);")
+ e.CheckRow(5, data)
+
+ data, _ = e.Query("select avg(speed) from tb interval(100000000b) sliding (100000000b);")
+ e.CheckRow(4, data)
+
+ data, _ = e.Query("select last(*) from tb")
+ tt, _ := time.Parse(layout, "2021-06-10 0:00:00.999999999")
+ e.CheckData2(0, 0, tt, data)
+
+ data, _ = e.Query("select first(*) from tb")
+ tt1, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001")
+ e.CheckData2(0, 0, tt1, data)
+
+ e.Execute("insert into tb values(now + 500000000b, 6);")
+ data, _ = e.Query("select * from tb;")
+ e.CheckRow(7, data)
+
+ e.Execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);")
+ e.Execute("insert into tb2 values(\"2021-06-10 0:00:00.100000001\", 1, \"2021-06-11 0:00:00.100000001\");")
+ e.Execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);")
+ e.Execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);")
+ e.Execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);")
+ e.Execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);")
+ e.Execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);")
+
+ data, _ = e.Query("select * from tb2;")
+ tt2, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001")
+ tt3, _ := time.Parse(layout, "2021-06-10 0:00:00.150000000")
+
+ e.CheckData2(0, 0, tt2, data)
+ e.CheckData2(1, 0, tt3, data)
+ e.CheckData2(2, 1, int32(4), data)
+ e.CheckData2(3, 1, int32(3), data)
+ tt4, _ := time.Parse(layout, "2021-06-11 00:00:00.300000001")
+ e.CheckData2(4, 2, tt4, data)
+ e.CheckRow(6, data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 > \"2021-06-11 0:00:00.100000000\" and ts2 < \"2021-06-11 0:00:00.100000002\";")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800500000000;")
+ e.CheckData2(0, 0, int64(1), data)
+ data, _ = e.Query("select count(*) from tb2 where ts2 < \"2021-06-11 0:00:00.400000000\";")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 < now + 400000000b;")
+ e.CheckData2(0, 0, int64(6), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 >= \"2021-06-11 0:00:00.100000001\";")
+ e.CheckData2(0, 0, int64(6), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 <= 1623340800400000000;")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.000000000\";")
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.300000001\";")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 = 1623340800300000001;")
+ e.CheckData2(0, 0, int64(1), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 between \"2021-06-11 0:00:00.299999999\" and \"2021-06-11 0:00:00.300000001\";")
+ e.CheckData2(0, 0, int64(3), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 <> 1623513600999999999;")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000001\";")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000000\";")
+ e.CheckData2(0, 0, int64(6), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 != 1623513600999999999;")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000001\";")
+ e.CheckData2(0, 0, int64(5), data)
+
+ data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000000\";")
+ e.CheckData2(0, 0, int64(6), data)
+
+ e.Execute("insert into tb2 values(now + 500000000b, 6, now +2d);")
+ data, _ = e.Query("select * from tb2;")
+ e.CheckRow(7, data)
+
+ e.Execute("create table tb3 (ts timestamp, speed int);")
+ _, err = e.Execute("insert into tb3 values(16232544001500000, 2);")
+ if err != nil {
+ fmt.Println("check pass! ")
+ }
+
+ e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456\", 2);")
+ data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456000\";")
+ e.CheckRow(1, data)
+
+ e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456789000\", 2);")
+ data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456789\";")
+ e.CheckRow(1, data)
+
+ // check timezone support
+
+ e.Execute("drop database if exists nsdb;")
+ e.Execute("create database nsdb precision 'ns';")
+ e.Execute("use nsdb;")
+ e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
+ e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);")
+ data, _ = e.Query("select first(*) from tb1;")
+
+ ttt, _ := time.Parse(layout, "2021-06-10 01:00:00.123456789")
+ e.CheckData2(0, 0, ttt, data)
+
+ e.Execute("create database usdb precision 'us';")
+ e.Execute("use usdb;")
+ e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
+ e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);")
+ data, _ = e.Query("select first(*) from tb1;")
+ ttt2, _ := time.Parse(layout, "2021-06-10 01:00:00.123456")
+ e.CheckData2(0, 0, ttt2, data)
+
+ e.Execute("drop database if exists msdb;")
+ e.Execute("create database msdb precision 'ms';")
+ e.Execute("use msdb;")
+ e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);")
+ e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);")
+ data, _ = e.Query("select first(*) from tb1;")
+ ttt3, _ := time.Parse(layout, "2021-06-10 01:00:00.123")
+ e.CheckData2(0, 0, ttt3, data)
+ fmt.Println("all test done!")
+
+}
+
+func prepareData(e *connector.Executor) {
+ sqlList := []string{
+ "reset query cache;",
+ "drop database if exists db;",
+ "create database db;",
+ "use db;",
+ "reset query cache;",
+ "drop database if exists db;",
+ "create database db precision 'ns';",
+ "show databases;",
+ "use db;",
+ "create table tb (ts timestamp, speed int);",
+ "insert into tb values('2021-06-10 0:00:00.100000001', 1);",
+ "insert into tb values(1623254400150000000, 2);",
+ "import into tb values(1623254400300000000, 3);",
+ "import into tb values(1623254400299999999, 4);",
+ "insert into tb values(1623254400300000001, 5);",
+ "insert into tb values(1623254400999999999, 7);",
+ }
+ for _, sql := range sqlList {
+ err := executeSql(e, sql)
+ if err != nil {
+ log.Fatalf("prepare data error:%v, sql:%s", err, sql)
+ }
+ }
+}
+
+func executeSql(e *connector.Executor, sql string) error {
+ _, err := e.Execute(sql)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/tests/http/restful/http_create_db.c b/tests/http/restful/http_create_db.c
new file mode 100644
index 0000000000000000000000000000000000000000..0bc52fa6cc86eaa1105b26472291ab2fca4f9db4
--- /dev/null
+++ b/tests/http/restful/http_create_db.c
@@ -0,0 +1,429 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url = "/rest/sql";
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i);
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket ar %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd at %d to epoll\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_create_tb.c b/tests/http/restful/http_create_tb.c
new file mode 100644
index 0000000000000000000000000000000000000000..91ffc54627724208c6ca5623fbe4df95829530c7
--- /dev/null
+++ b/tests/http/restful/http_create_tb.c
@@ -0,0 +1,433 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+ snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_drop_db.c b/tests/http/restful/http_drop_db.c
new file mode 100644
index 0000000000000000000000000000000000000000..f82db901dd38becafbc6eba51b8407e4b4488693
--- /dev/null
+++ b/tests/http/restful/http_drop_db.c
@@ -0,0 +1,433 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+ snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_insert_tb.c b/tests/http/restful/http_insert_tb.c
new file mode 100644
index 0000000000000000000000000000000000000000..f9590d856cc6275d7df250fee920c1b2080f0499
--- /dev/null
+++ b/tests/http/restful/http_insert_tb.c
@@ -0,0 +1,455 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 4096
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv, offset;
+ int epfd;
+ uint32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ uint16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ struct timeval now;
+ int64_t start_time;
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ gettimeofday(&now, NULL);
+ start_time = now.tv_sec * 1000000 + now.tv_usec;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+
+ offset = 0;
+
+ ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i);
+ if (ret <= 0) {
+ printf("failed to snprintf for sql(prefix), index: %d\r\n ", i);
+ goto failed;
+ }
+
+ offset += ret;
+
+ while (offset < REQ_MAX_LINE - 128) {
+ ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i);
+ if (ret <= 0) {
+ printf("failed to snprintf for sql(values), index: %d\r\n ", i);
+ goto failed;
+ }
+
+ offset += ret;
+ }
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_query_tb.c b/tests/http/restful/http_query_tb.c
new file mode 100644
index 0000000000000000000000000000000000000000..e7ac0d4b01cdeb4b6d3b29fa2741667914fb2e74
--- /dev/null
+++ b/tests/http/restful/http_query_tb.c
@@ -0,0 +1,432 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 4096
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ uint32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ uint16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+
+ snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_use_db.c b/tests/http/restful/http_use_db.c
new file mode 100644
index 0000000000000000000000000000000000000000..3b270224704b8cf7b9204d68f46f6d499e6f2ecd
--- /dev/null
+++ b/tests/http/restful/http_use_db.c
@@ -0,0 +1,430 @@
+#include
+#include
+#include
+#include
+#include