diff --git a/.gitmodules b/.gitmodules
index dbb02d4ef7ed65d11418e271cac7e61b95c2a482..4b0b8dcab54c3dcd0bdbd75a4f4a2871ce3218a7 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -19,3 +19,6 @@
[submodule "src/plugins/blm3"]
path = src/plugins/blm3
url = https://github.com/taosdata/blm3
+[submodule "deps/avro"]
+ path = deps/avro
+ url = https://github.com/apache/avro
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 75f98f96bcb26ae12fd32b56f2533db3001c6ae5..547455d07b6ba25ac58ae5e4851c5cd5b08e3c60 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,6 +15,26 @@ ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 3.0)
ENDIF ()
+if(NOT WIN32)
+ string(ASCII 27 Esc)
+ set(ColourReset "${Esc}[m")
+ set(ColourBold "${Esc}[1m")
+ set(Red "${Esc}[31m")
+ set(Green "${Esc}[32m")
+ set(Yellow "${Esc}[33m")
+ set(Blue "${Esc}[34m")
+ set(Magenta "${Esc}[35m")
+ set(Cyan "${Esc}[36m")
+ set(White "${Esc}[37m")
+ set(BoldRed "${Esc}[1;31m")
+ set(BoldGreen "${Esc}[1;32m")
+ set(BoldYellow "${Esc}[1;33m")
+ set(BoldBlue "${Esc}[1;34m")
+ set(BoldMagenta "${Esc}[1;35m")
+ set(BoldCyan "${Esc}[1;36m")
+ set(BoldWhite "${Esc}[1;37m")
+endif()
+
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
diff --git a/Jenkinsfile b/Jenkinsfile
index f0f3e0d122ad470cce0ef9586e01fe9431ccfa8d..c3122ab88ec6a94036f0da0c3ac559efe27878aa 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -107,7 +107,77 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
- pip3 install ${WKC}/src/connector/python/ || echo "not install"
+ pip3 install ${WKC}/src/connector/python/
+ '''
+ return 1
+}
+def pre_test_noinstall(){
+ sh'hostname'
+ sh'''
+ cd ${WKC}
+ git reset --hard HEAD~10 >/dev/null
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WKC}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WKC}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WKC}
+ git checkout develop
+ '''
+ }
+ }
+ sh'''
+ cd ${WKC}
+ git pull >/dev/null
+ git fetch origin +refs/pull/${CHANGE_ID}/merge
+ git checkout -qf FETCH_HEAD
+ git clean -dfx
+ git submodule update --init --recursive
+ cd ${WK}
+ git reset --hard HEAD~10
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WK}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WK}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WK}
+ git checkout develop
+ '''
+ }
+ }
+ sh '''
+ cd ${WK}
+ git pull >/dev/null
+
+ export TZ=Asia/Harbin
+ date
+ git clean -dfx
+ mkdir debug
+ cd debug
+ cmake .. > /dev/null
+ make > /dev/null
'''
return 1
}
@@ -460,31 +530,55 @@ pipeline {
stage('arm64centos7') {
agent{label " arm64centos7 "}
steps {
- pre_test()
+ pre_test_noinstall()
}
}
stage('arm64centos8') {
agent{label " arm64centos8 "}
steps {
- pre_test()
+ pre_test_noinstall()
}
}
stage('arm32bionic') {
agent{label " arm32bionic "}
steps {
- pre_test()
+ pre_test_noinstall()
}
}
stage('arm64bionic') {
agent{label " arm64bionic "}
steps {
- pre_test()
+ pre_test_noinstall()
}
}
stage('arm64focal') {
agent{label " arm64focal "}
steps {
- pre_test()
+ pre_test_noinstall()
+ }
+ }
+ stage('centos7') {
+ agent{label " centos7 "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:trusty') {
+ agent{label " trusty "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:xenial') {
+ agent{label " xenial "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:bionic') {
+ agent{label " bionic "}
+ steps {
+ pre_test_noinstall()
}
}
diff --git a/cmake/define.inc b/cmake/define.inc
index bb6b285f268a6476c79fb599e76b1fd0435173b5..28b35cdb23e64a651c47ffba9cfc8ebff7a3cabc 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -128,7 +128,6 @@ IF (TD_APLHINE)
MESSAGE(STATUS "aplhine is defined")
ENDIF ()
-MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP})
IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX)
IF (TD_ARM_32)
@@ -140,7 +139,6 @@ IF ("${BUILD_HTTP}" STREQUAL "")
SET(BUILD_HTTP "true")
ENDIF ()
ENDIF ()
-MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP})
IF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE)
@@ -150,6 +148,14 @@ IF (TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF ()
+IF ("${AVRO_SUPPORT}" MATCHES "true")
+ SET(TD_AVRO_SUPPORT TRUE)
+ENDIF ()
+
+IF (TD_AVRO_SUPPORT)
+ ADD_DEFINITIONS(-DAVRO_SUPPORT)
+ENDIF ()
+
IF (TD_LINUX)
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_LINUX)
diff --git a/cmake/input.inc b/cmake/input.inc
index 5bd1a7bed6fe9b0c7dc51c46870d8109462eae81..d18aa56ce1c684cd54286421c975ddf485129cb5 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -92,6 +92,8 @@ ENDIF ()
SET(TD_BUILD_HTTP FALSE)
+SET(TD_AVRO_SUPPORT FALSE)
+
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt
index 45828245e2d541114a2ae0a287e0c6acbd0d42be..8228f7a8006c8c1aea315c875183312c84f08f2c 100644
--- a/deps/CMakeLists.txt
+++ b/deps/CMakeLists.txt
@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
+IF (AVRO_SUPPORT)
+ MESSAGE("")
+ MESSAGE("${Green} ENABLE avro format support ${ColourReset}")
+ MESSAGE("")
+ include(ExternalProject)
+ ExternalProject_Add(
+ apache-avro
+ PREFIX "avro"
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c
+ BUILD_IN_SOURCE 1
+ PATCH_COMMAND
+ COMMAND git clean -f -d
+ COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build
+ )
+ELSE ()
+ MESSAGE("")
+ MESSAGE("${Yellow} NO avro format support ${ColourReset}")
+ MESSAGE("")
+ENDIF ()
+
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
+ MESSAGE("")
+ MESSAGE("${Green} ENABLE jemalloc ${ColourReset}")
+ MESSAGE("")
MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
- include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
@@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true")
- ADD_SUBDIRECTORY(TSZ)
-ENDIF()
\ No newline at end of file
+ ADD_SUBDIRECTORY(TSZ)
+ENDIF()
diff --git a/deps/TSZ b/deps/TSZ
deleted file mode 160000
index 0ca5b15a8eac40327dd737be52c926fa5675712c..0000000000000000000000000000000000000000
--- a/deps/TSZ
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
diff --git a/deps/avro b/deps/avro
new file mode 160000
index 0000000000000000000000000000000000000000..a1fce29d9675b4dd95dfee9db32cc505d0b2227c
--- /dev/null
+++ b/deps/avro
@@ -0,0 +1 @@
+Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index 799cfc14a300d3f4c9fcbf8537f04984ae8e1df4..bc3259365d0b658184318e994ffd31a9e4ffee90 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -3,7 +3,7 @@
## Grafana
-TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
+TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。
### 安装Grafana
@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 配置Grafana
-TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。
-
-以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
+TDengine 的 Grafana 插件请从 下载。
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
+GF_VERSION=3.1.1
+wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
-Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
+以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
+
+```bash
+sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
+
+Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
+
+```ini
[plugins]
-enable_alpha = true
-allow_loading_unsigned_plugins = taosdata-tdengine-datasource
+allow_loading_unsigned_plugins = tdengine-datasource
```
### 使用 Grafana
@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
* ALIAS BY:可设置当前查询别名。
* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
-
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:

@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
#### 导入 Dashboard
-在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
+我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。
-点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
+点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载:

导入完成之后可看到如下效果:
-
-
+
## MATLAB
diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md
index 4bdcd52d62f8c3a95bc91261b77242e5263a8f23..04765602dab18fbacf7d92d44ca324db660c0ac4 100644
--- a/documentation20/cn/14.devops/01.telegraf/docs.md
+++ b/documentation20/cn/14.devops/01.telegraf/docs.md
@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置
-### 复制 TDengine 插件到 grafana 插件目录
-```
-1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
-2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
-3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
-4. sudo systemctl restart grafana-server.service
+### 下载 TDengine 插件到 grafana 插件目录
+
+```bash
+1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
+2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
+3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
+4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
+5. sudo systemctl restart grafana-server.service
```
### 修改 /etc/telegraf/telegraf.conf
@@ -61,7 +63,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。
-点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘:
+点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:

diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md
index 2a031d63e55ed7888332757170b781beae787ff7..a35772bb498d426a1f44a9e7eb0bea61b51f92a5 100644
--- a/documentation20/cn/14.devops/02.collectd/docs.md
+++ b/documentation20/cn/14.devops/02.collectd/docs.md
@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录
-```
-1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
-2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
-3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
-4. sudo systemctl restart grafana-server.service
+
+```bash
+1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
+2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
+3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
+4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
+5. sudo systemctl restart grafana-server.service
```
### 配置 collectd
@@ -62,13 +64,13 @@ repeater 部分添加 { host:'', port: .
+
+```bash
+GF_VERSION=3.1.1
+wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
+sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
```
### Use Grafana
@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard
-A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`.
+We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。
-Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
+Click the `Import` button on the left panel and load the grafana id:

You can see as follows after Dashboard imported.
-
+
## MATLAB
diff --git a/documentation20/en/images/connections/dashboard-15146.png b/documentation20/en/images/connections/dashboard-15146.png
new file mode 100644
index 0000000000000000000000000000000000000000..3eb240ad8ad648953e32f27e674e2a9171ed9af8
Binary files /dev/null and b/documentation20/en/images/connections/dashboard-15146.png differ
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 48f0bee6b34496603d67f74938857d7bb94627f2..e42212ff0f55420dfa5f23638a69439be795e43a 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -203,6 +203,9 @@ keepColumnName 1
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
+# http keep alive, default is 30 seconds
+# httpKeepAlive 30000
+
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
diff --git a/packaging/check_package.sh b/packaging/check_package.sh
index edc98da65e5574b91efbce16f4df0fd042b18c13..5904aeb6f1cf4b9c2e558bf95f9030c5aedf176b 100755
--- a/packaging/check_package.sh
+++ b/packaging/check_package.sh
@@ -128,12 +128,12 @@ function check_link() {
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
- for i in ${main_dir[@]};do
+ for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
- for i in ${nginx_main_dir[@]};do
+ for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
@@ -143,11 +143,11 @@ function check_main_path() {
function check_bin_path() {
# check install bin dir and all sub dir
bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
- for i in ${bin_dir[@]};do
+ for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
- for i in ${lbin_dir[@]};do
+ for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
@@ -171,7 +171,7 @@ function check_lib_path() {
function check_header_path() {
# check all header
header_dir=("taos.h" "taoserror.h")
- for i in ${header_dir[@]};do
+ for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 2c18cec497c0a741c96f13afb06794e26e8eaf1c..9ebaf7af98bb4336cb5748afd2b52646f5eeac3e 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -68,12 +68,6 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
-if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
- cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
-else
- echo "grafanaplugin bundled directory not found!"
- exit 1
-fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
diff --git a/packaging/release.sh b/packaging/release.sh
index 705103a87a35a73b2a91079707785279416644cd..dc7f6e67e723e71fe1cdf31880a4ebfcd5dd385d 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -151,7 +151,7 @@ function vercomp () {
}
# 1. check version information
-if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
+if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then
echo "please enter correct version"
exit 0
fi
diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh
index 4cc7daf1a4cd15d06db084faf23dd4fcb15a955d..b9cfff37d489c7058761ed8dfd4a3b9964c9f5db 100755
--- a/packaging/rpm/makerpm.sh
+++ b/packaging/rpm/makerpm.sh
@@ -36,7 +36,7 @@ local cur_dir
cd $1
cur_dir=$(pwd)
-for dirlist in $(ls ${cur_dir}); do
+for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then
cd ${dirlist}
cp_rpm_package ${cur_dir}/${dirlist}
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 19fe23d194be2266bcb68034e3c4fd90d9824f3d..aa02955f7fe77d28b5a483a5eaa9a0960c17d278 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -73,12 +73,6 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
-if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
- cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
-else
- echo grafanaplugin bundled directory not found!
- exit 1
-fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
diff --git a/packaging/tools/check_os.sh b/packaging/tools/check_os.sh
index 92522f7b82e166c1d6ec365619869ad68969155c..cc8c6e0e9366232deb9013db62b29afebd179135 100755
--- a/packaging/tools/check_os.sh
+++ b/packaging/tools/check_os.sh
@@ -1,4 +1,4 @@
-# /bin/bash
+#!/bin/bash
#
CSI=$(echo -e "\033[")
CRED="${CSI}1;31m"
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 2d3ed2e0f8f97c4604471659415a691d1b704a60..80bbad4bd9e68dd66571500cfed7ec8cd81a80cb 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -358,7 +358,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index 05eb09d8f3a8b5237c36714e964530b877e332de..0e0ee7ba31f4715b2c5585dd040727d604aa90b1 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh
index 527f9a231e5a97fa086ef655cd420abc61677fcf..e5675b858066148df07508ad2438b0f00d7ce7bf 100755
--- a/packaging/tools/install_pro.sh
+++ b/packaging/tools/install_pro.sh
@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -305,7 +305,7 @@ function set_hostname() {
echo "set hostname fail!"
return
fi
-
+
#ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then
${csudo} echo $newHostname > /etc/hostname ||:
@@ -330,7 +330,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh
index 52e08cb6b0d00b25686b87e2f066401e0388d4ce..ef5fb8c05a4a98a55918ee217125bd0f0a09b955 100755
--- a/packaging/tools/install_tq.sh
+++ b/packaging/tools/install_tq.sh
@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 7fbdbab1c798af572fc67cf79f27812ea64d3bae..96ba703cb37b191400ed240f0f3a184fda7eba71 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -212,7 +212,8 @@ function install_jemalloc() {
fi
if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
- /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
+ /usr/local/include/jemalloc
fi
if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then
/usr/bin/install -c -d /usr/local/lib
@@ -225,23 +226,47 @@ function install_jemalloc() {
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
- /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
+ /usr/local/lib/pkgconfig
+ fi
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
fi
fi
if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
- /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
+ /usr/local/share/doc/jemalloc
fi
if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
- /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
+ /usr/local/share/man/man3
fi
- if [ -d /etc/ld.so.conf.d ]; then
- echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
- ${csudo} ldconfig
- else
- echo "/etc/ld.so.conf.d not found!"
+ fi
+}
+
+function install_avro() {
+ if [ "$osType" != "Darwin" ]; then
+ if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
+ /usr/bin/install -c -d /usr/local/$1
+ /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
+ ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
+ ln -sf libavro.so.23 /usr/local/$1/libavro.so
+ /usr/bin/install -c -d /usr/local/$1
+ [ -f ${binary_dir}/build/$1/libavro.a ] &&
+ /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
fi
fi
}
@@ -292,6 +317,8 @@ function install_lib() {
fi
install_jemalloc
+ install_avro lib
+ install_avro lib64
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
@@ -381,11 +408,6 @@ function install_data() {
}
function install_connector() {
- if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
- ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index d26f617e421406364ce4d34c4baf5c55b904a2b5..39a35e384fffdd4f319e72fbeb819fe08f7871b8 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 89591cac234b190f55d144ccf98cb2d5c70a7936..19e24b3dafb7f1f95832e637e181449e4c381faf 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh
index 599c91fbf082955887c677b750aa12f946c0890b..4a0b033d30e6478f37a62f9cc896aee0903d39c9 100755
--- a/packaging/tools/makeclient_pro.sh
+++ b/packaging/tools/makeclient_pro.sh
@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh
index 03d9b13059daadfdc7207c78b6f89cae321f25ac..1cc7003661a7491b1df625916dd289de32434ee9 100755
--- a/packaging/tools/makeclient_tq.sh
+++ b/packaging/tools/makeclient_tq.sh
@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index f0c25208529768fb387262a668381a57e34f51ac..7071912fc8133fb2bf1b15f992ff61c514bb79a1 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index dbb7e6887fa1b0f96ea68f1c880ee77ced0858bd..0b24100c3eb6be74ee4b415759a263647a395da3 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh
index 1668838be0522bc02ab027b6ee4ac6ff250fefa2..a69e542c3c5969d609f8d5a00b6428add15fd950 100755
--- a/packaging/tools/makepkg_pro.sh
+++ b/packaging/tools/makepkg_pro.sh
@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
-# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
-# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
-# else
-# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
-# fi
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector
# else
diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh
index 416a3f60a4a57d6afa34d1d8f931a7efd68d6958..ccf42a8aab090b95de8e889b3a8186be9a6cba7a 100755
--- a/packaging/tools/makepkg_tq.sh
+++ b/packaging/tools/makepkg_tq.sh
@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index 9956455691a9d042d20082eb70cd23d99c1cca77..4cfa6035e8f42efd018b0b7c462d6bf8ad874338 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -127,7 +127,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -182,7 +182,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index eeb3e14d221b49a0dbfcaeb71e600aca0d44eef4..f0427884ed0ecaa8afa4f24a1fbcec5e151c95b2 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -2501,6 +2501,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg12 = "parameter is out of range [1, 100]";
const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
+ const char* msg15 = "parameter is out of range [1, 1000]";
switch (functionId) {
case TSDB_FUNC_COUNT: {
@@ -2948,11 +2949,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
} else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
+ if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
- int64_t numRowsSelected = GET_INT32_VAL(val);
+ int64_t numRowsSelected = GET_INT64_VAL(val);
if (numRowsSelected <= 0 || numRowsSelected > 1000) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
}
// todo REFACTOR
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 799bacda2ba9a3b52a99859edb5968d8602b4c33..40e67b88ebfde12eaf5230f8e05c7f7e1eb742ef 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
-extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
@@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores;
extern int8_t tsDaylight;
extern char tsTimezone[];
extern char tsLocale[];
-extern char tsCharset[]; // default encode string
+extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
-//query buffer management
-extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
-extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
-extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked
+// query buffer management
+extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
+extern int64_t
+ tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
+extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked
-extern int8_t tsKeepOriginalColumnName;
+extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsMaxSQLStringLen;
@@ -108,7 +109,7 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate;
extern int8_t tsCacheLastRow;
-//tsdb
+// tsdb
extern bool tsdbForceKeepFile;
extern bool tsdbForceCompactFile;
extern int32_t tsdbWalFlushSize;
@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum;
extern int8_t tsHttpDbNameMandatory;
+extern int32_t tsHttpKeepAlive;
// mqtt
extern int8_t tsEnableMqttModule;
@@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen;
// system info
-extern char tsOsName[];
-extern int64_t tsPageSize;
-extern int64_t tsOpenMax;
-extern int64_t tsStreamMax;
-extern int32_t tsNumOfCores;
-extern float tsTotalLogDirGB;
-extern float tsTotalTmpDirGB;
-extern float tsTotalDataDirGB;
-extern float tsAvailLogDirGB;
-extern float tsAvailTmpDirectorySpace;
-extern float tsAvailDataDirGB;
-extern float tsUsedDataDirGB;
-extern float tsMinimalLogDirGB;
-extern float tsReservedTmpDirectorySpace;
-extern float tsMinimalDataDirGB;
-extern int32_t tsTotalMemoryMB;
+extern char tsOsName[];
+extern int64_t tsPageSize;
+extern int64_t tsOpenMax;
+extern int64_t tsStreamMax;
+extern int32_t tsNumOfCores;
+extern float tsTotalLogDirGB;
+extern float tsTotalTmpDirGB;
+extern float tsTotalDataDirGB;
+extern float tsAvailLogDirGB;
+extern float tsAvailTmpDirectorySpace;
+extern float tsAvailDataDirGB;
+extern float tsUsedDataDirGB;
+extern float tsMinimalLogDirGB;
+extern float tsReservedTmpDirectorySpace;
+extern float tsMinimalDataDirGB;
+extern int32_t tsTotalMemoryMB;
extern uint32_t tsVersion;
// build info
@@ -196,37 +198,37 @@ extern char gitinfoOfInternal[];
extern char buildinfo[];
// log
-extern int8_t tsAsyncLog;
-extern int32_t tsNumOfLogLines;
-extern int32_t tsLogKeepDays;
-extern int32_t dDebugFlag;
-extern int32_t vDebugFlag;
-extern int32_t mDebugFlag;
+extern int8_t tsAsyncLog;
+extern int32_t tsNumOfLogLines;
+extern int32_t tsLogKeepDays;
+extern int32_t dDebugFlag;
+extern int32_t vDebugFlag;
+extern int32_t mDebugFlag;
extern uint32_t cDebugFlag;
-extern int32_t jniDebugFlag;
-extern int32_t tmrDebugFlag;
-extern int32_t sdbDebugFlag;
-extern int32_t httpDebugFlag;
-extern int32_t mqttDebugFlag;
-extern int32_t monDebugFlag;
-extern int32_t uDebugFlag;
-extern int32_t rpcDebugFlag;
-extern int32_t odbcDebugFlag;
+extern int32_t jniDebugFlag;
+extern int32_t tmrDebugFlag;
+extern int32_t sdbDebugFlag;
+extern int32_t httpDebugFlag;
+extern int32_t mqttDebugFlag;
+extern int32_t monDebugFlag;
+extern int32_t uDebugFlag;
+extern int32_t rpcDebugFlag;
+extern int32_t odbcDebugFlag;
extern uint32_t qDebugFlag;
-extern int32_t wDebugFlag;
-extern int32_t cqDebugFlag;
-extern int32_t debugFlag;
+extern int32_t wDebugFlag;
+extern int32_t cqDebugFlag;
+extern int32_t debugFlag;
extern int8_t tsClientMerge;
#ifdef TD_TSZ
// lossy
-extern char lossyColumns[];
-extern double fPrecision;
-extern double dPrecision;
+extern char lossyColumns[];
+extern double fPrecision;
+extern double dPrecision;
extern uint32_t maxRange;
extern uint32_t curRange;
-extern char Compressor[];
+extern char Compressor[];
#endif
// long query
extern int8_t tsDeadLockKillQuery;
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index ebfd5e18756298c18d1d2060bed30b2aee00d1b0..2995695cc198a7963f99b7b7cd7f49d9fde01da5 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -14,18 +14,18 @@
*/
#define _DEFAULT_SOURCE
+#include "tglobal.h"
+#include "monitor.h"
#include "os.h"
#include "taosdef.h"
#include "taoserror.h"
-#include "tulog.h"
+#include "tcompare.h"
#include "tconfig.h"
-#include "tglobal.h"
-#include "monitor.h"
-#include "tsocket.h"
-#include "tutil.h"
#include "tlocale.h"
+#include "tsocket.h"
#include "ttimezone.h"
-#include "tcompare.h"
+#include "tulog.h"
+#include "tutil.h"
// cluster
char tsFirst[TSDB_EP_LEN] = {0};
@@ -49,16 +49,16 @@ int32_t tsDnodeId = 0;
int64_t tsDnodeStartTime = 0;
// common
-int32_t tsRpcTimer = 300;
-int32_t tsRpcMaxTime = 600; // seconds;
-int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
-int32_t tsMaxShellConns = 50000;
+int32_t tsRpcTimer = 300;
+int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default
+int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
-int32_t tsShellActivityTimer = 3; // second
+int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0f;
int32_t tsNumOfCommitThreads = 4;
float tsRatioOfQueryCores = 1.0f;
-int8_t tsDaylight = 0;
+int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
@@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN;
-int8_t tsTscEnableRecordSql = 0;
+int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
@@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000;
int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result
-int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
+int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly
-int32_t tsRetryStreamCompDelay = 10*1000;
+int32_t tsRetryStreamCompDelay = 10 * 1000;
// The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f;
@@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1;
int32_t tsRetrieveBlockingModel = 0;
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
-int8_t tsKeepOriginalColumnName = 0;
+int8_t tsKeepOriginalColumnName = 0;
// db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
-int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
-int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
+int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
+int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
-int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
+int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
-int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
-int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
-int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
-int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
-int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
-int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION;
-int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
-int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
-int32_t tsMaxVgroupsPerDb = 0;
+int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
+int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
+int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
+int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
+int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
+int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION;
+int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
+int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
+int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
-// tsdb config
+// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
-bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly
+bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly
int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB
// balance
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
-int32_t tsBalanceInterval = 300; // seconds
+int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
@@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0;
int8_t tsHttpDbNameMandatory = 0;
+int32_t tsHttpKeepAlive = 30000;
// mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
-char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
-char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
-char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
-char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
-char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
-char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
+char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
+char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
+char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
+char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
+char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
+char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor
int8_t tsEnableMonitorModule = 1;
@@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds
// stream
-int8_t tsEnableStream = 1;
+int8_t tsEnableStream = 1;
// internal
int8_t tsCompactMnodeWal = 0;
@@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0};
char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/";
-int32_t tsDiskCfgNum = 0;
+int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
@@ -231,42 +232,42 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS];
int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L};
// system info
-char tsOsName[10] = "Linux";
-int64_t tsPageSize;
-int64_t tsOpenMax;
-int64_t tsStreamMax;
-int32_t tsNumOfCores = 1;
-float tsTotalTmpDirGB = 0;
-float tsTotalDataDirGB = 0;
-float tsAvailTmpDirectorySpace = 0;
-float tsAvailDataDirGB = 0;
-float tsUsedDataDirGB = 0;
-float tsReservedTmpDirectorySpace = 1.0f;
-float tsMinimalDataDirGB = 2.0f;
-int32_t tsTotalMemoryMB = 0;
+char tsOsName[10] = "Linux";
+int64_t tsPageSize;
+int64_t tsOpenMax;
+int64_t tsStreamMax;
+int32_t tsNumOfCores = 1;
+float tsTotalTmpDirGB = 0;
+float tsTotalDataDirGB = 0;
+float tsAvailTmpDirectorySpace = 0;
+float tsAvailDataDirGB = 0;
+float tsUsedDataDirGB = 0;
+float tsReservedTmpDirectorySpace = 1.0f;
+float tsMinimalDataDirGB = 2.0f;
+int32_t tsTotalMemoryMB = 0;
uint32_t tsVersion = 0;
// log
-int32_t tsNumOfLogLines = 10000000;
-int32_t mDebugFlag = 131;
-int32_t sdbDebugFlag = 131;
-int32_t dDebugFlag = 135;
-int32_t vDebugFlag = 135;
+int32_t tsNumOfLogLines = 10000000;
+int32_t mDebugFlag = 131;
+int32_t sdbDebugFlag = 131;
+int32_t dDebugFlag = 135;
+int32_t vDebugFlag = 135;
uint32_t cDebugFlag = 131;
-int32_t jniDebugFlag = 131;
-int32_t odbcDebugFlag = 131;
-int32_t httpDebugFlag = 131;
-int32_t mqttDebugFlag = 131;
-int32_t monDebugFlag = 131;
+int32_t jniDebugFlag = 131;
+int32_t odbcDebugFlag = 131;
+int32_t httpDebugFlag = 131;
+int32_t mqttDebugFlag = 131;
+int32_t monDebugFlag = 131;
uint32_t qDebugFlag = 131;
-int32_t rpcDebugFlag = 131;
-int32_t uDebugFlag = 131;
-int32_t debugFlag = 0;
-int32_t sDebugFlag = 135;
-int32_t wDebugFlag = 135;
-int32_t tsdbDebugFlag = 131;
-int32_t cqDebugFlag = 131;
-int32_t fsDebugFlag = 135;
+int32_t rpcDebugFlag = 131;
+int32_t uDebugFlag = 131;
+int32_t debugFlag = 0;
+int32_t sDebugFlag = 135;
+int32_t wDebugFlag = 135;
+int32_t tsdbDebugFlag = 131;
+int32_t cqDebugFlag = 131;
+int32_t fsDebugFlag = 135;
int8_t tsClientMerge = 0;
@@ -274,13 +275,14 @@ int8_t tsClientMerge = 0;
//
// lossy compress 6
//
-char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress.
-// below option can take effect when tsLossyColumns not empty
-double fPrecision = 1E-8; // float column precision
-double dPrecision = 1E-16; // double column precision
-uint32_t maxRange = 500; // max range
-uint32_t curRange = 100; // range
-char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
+char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
+ // can close lossy compress.
+// below option can take effect when tsLossyColumns not empty
+double fPrecision = 1E-8; // float column precision
+double dPrecision = 1E-16; // double column precision
+uint32_t maxRange = 500; // max range
+uint32_t curRange = 100; // range
+char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif
// long query death-lock
@@ -298,7 +300,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"};
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
void taosSetAllDebugFlag() {
- if (debugFlag != 0) {
+ if (debugFlag != 0) {
mDebugFlag = debugFlag;
sdbDebugFlag = debugFlag;
dDebugFlag = debugFlag;
@@ -309,7 +311,7 @@ void taosSetAllDebugFlag() {
httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag;
monDebugFlag = debugFlag;
- qDebugFlag = debugFlag;
+ qDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
sDebugFlag = debugFlag;
@@ -321,12 +323,13 @@ void taosSetAllDebugFlag() {
}
bool taosCfgDynamicOptions(char *msg) {
- char *option, *value;
- int32_t olen, vlen;
- int32_t vint = 0;
+ char *option, *value;
+ int32_t olen, vlen;
+ int32_t vint = 0;
paGetToken(msg, &option, &olen);
- if (olen == 0) return false;;
+ if (olen == 0) return false;
+ ;
paGetToken(option + olen + 1, &value, &vlen);
if (vlen == 0)
@@ -339,9 +342,9 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i;
- //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
+ // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
-
+
int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue;
@@ -370,7 +373,7 @@ bool taosCfgDynamicOptions(char *msg) {
return true;
}
if (strncasecmp(cfg->option, "debugFlag", olen) == 0) {
- taosSetAllDebugFlag();
+ taosSetAllDebugFlag();
}
return true;
}
@@ -427,7 +430,7 @@ static void taosCheckDataDirCfg() {
}
static int32_t taosCheckTmpDir(void) {
- if (strlen(tsTempDir) <= 0){
+ if (strlen(tsTempDir) <= 0) {
uError("tempDir is not set");
return -1;
}
@@ -448,7 +451,7 @@ static void doInitGlobalConfig(void) {
srand(taosSafeRand());
SGlobalCfg cfg = {0};
-
+
// ip address
cfg.option = "firstEp";
cfg.ptr = tsFirst;
@@ -577,12 +580,12 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
- cfg.minValue = 10*10000;
- cfg.maxValue = 10000*10000;
+ cfg.minValue = 10 * 10000;
+ cfg.maxValue = 10000 * 10000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
-
+
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1189,7 +1192,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- // module configs
+ // module configs
cfg.option = "flowctrl";
cfg.ptr = &tsEnableFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1320,6 +1323,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ // pContext in cache
+ cfg.option = "httpKeepAlive";
+ cfg.ptr = &tsHttpKeepAlive;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 3000;
+ cfg.maxValue = 3600000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// debug flag
cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines;
@@ -1401,7 +1415,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
-
cfg.option = "sdbDebugFlag";
cfg.ptr = &sdbDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1633,7 +1646,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- // enable kill long query
+ // enable kill long query
cfg.option = "deadLockKillQuery";
cfg.ptr = &tsDeadLockKillQuery;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1731,21 +1744,18 @@ static void doInitGlobalConfig(void) {
#else
assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif
-
}
-void taosInitGlobalCfg() {
- pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
-}
+void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); }
int32_t taosCheckGlobalCfg() {
- char fqdn[TSDB_FQDN_LEN];
+ char fqdn[TSDB_FQDN_LEN];
uint16_t port;
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag();
}
-
+
if (tsLocalFqdn[0] == 0) {
taosGetFqdn(tsLocalFqdn);
}
@@ -1772,7 +1782,7 @@ int32_t taosCheckGlobalCfg() {
if (taosCheckTmpDir()) {
return -1;
}
-
+
taosGetSystemInfo();
tsSetLocale();
@@ -1794,8 +1804,8 @@ int32_t taosCheckGlobalCfg() {
}
if (tsMaxTablePerVnode < tsMinTablePerVnode) {
- uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)",
- tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode);
+ uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode,
+ tsMinTablePerVnode, tsMinTablePerVnode);
tsMaxTablePerVnode = tsMinTablePerVnode;
}
@@ -1817,7 +1827,7 @@ int32_t taosCheckGlobalCfg() {
}
tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035]
- tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp
+ tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp
tsSyncPort = tsServerPort + TSDB_PORT_SYNC;
tsHttpPort = tsServerPort + TSDB_PORT_HTTP;
@@ -1837,17 +1847,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
strcpy(fqdn, ep);
char *temp = strchr(fqdn, ':');
- if (temp) {
+ if (temp) {
*temp = 0;
- *port = atoi(temp+1);
- }
-
+ *port = atoi(temp + 1);
+ }
+
if (*port == 0) {
*port = tsServerPort;
return -1;
}
- return 0;
+ return 0;
}
/*
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index 22fb0c4ae4987ade0a406fe5628bf80d975f3ae5..42ebedf4027b0e333b9e79b8045f1bae0d338ac7 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -36,15 +36,15 @@ import java.util.regex.Pattern;
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
+ // for jdbc preparedStatement interface
private String rawSql;
private Object[] parameters;
-
- private ArrayList colData;
+ // for parameter binding
+ private long nativeStmtHandle = 0;
+ private String tableName;
private ArrayList tableTags;
private int tagValueLength;
-
- private String tableName;
- private long nativeStmtHandle = 0;
+ private ArrayList colData;
TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection);
@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql();
}
- /*
- *
- */
-
/**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
- if (isClosed()) {
+ if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- }
-
- if (parameterIndex < 1 && parameterIndex >= parameters.length) {
+ if (parameterIndex < 1 && parameterIndex >= parameters.length)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
- }
parameters[parameterIndex - 1] = x;
}
@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- // TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- //TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
-
}
@Override
@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible
- // set the bind table name
+ // parameter binding
private static class ColumnInfo {
@SuppressWarnings("rawtypes")
private ArrayList data;
@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
- public void setTableName(String name) {
+ public void setTableName(String name) throws SQLException {
+ if (this.tableName != null) {
+ this.columnDataExecuteBatch();
+ this.columnDataClearBatchInternal();
+ }
this.tableName = name;
}
@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle);
- this.columnDataClearBatch();
+ this.columnDataClearBatchInternal();
}
+ @Deprecated
public void columnDataClearBatch() {
+ columnDataClearBatchInternal();
+ }
+
+ private void columnDataClearBatchInternal() {
int size = this.colData.size();
this.colData.clear();
-
this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name
}
+
public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle);
@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.nativeStmtHandle = 0L;
this.tableName = null;
}
+
+ @Override
+ public void close() throws SQLException {
+ this.columnDataClearBatchInternal();
+ this.columnDataCloseBatch();
+ super.close();
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
index 99e46bc64f44f6326aec12734849cc5ef518c903..6cfc01cc9d28648d09023ff10cc34bbe7ff29499 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
@@ -5,9 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
-import org.apache.http.NoHttpResponseException;
import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
@@ -21,10 +19,7 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
-import javax.net.ssl.SSLException;
import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
@@ -53,10 +48,9 @@ public class HttpClientPoolUtil {
return DEFAULT_HTTP_KEEP_TIME * 1000;
};
- private static CloseableHttpClient httpClient;
+ private static final CloseableHttpClient httpClient;
static {
-
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..46f201d1c0a525f52014d133e25fc0db4741050c
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java
@@ -0,0 +1,139 @@
+package com.taosdata.jdbc;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.stream.Collectors;
+
+public class ParameterBindTest {
+
+ private static final String host = "127.0.0.1";
+ private static final String stable = "weather";
+
+ private Connection conn;
+ private final Random random = new Random(System.currentTimeMillis());
+
+ @Test
+ public void test() {
+ // given
+ String[] tbnames = {"t1", "t2", "t3"};
+ int rows = 10;
+
+ // when
+ insertIntoTables(tbnames, 10);
+
+ // then
+ assertRows(stable, tbnames.length * rows);
+ for (String t : tbnames) {
+ assertRows(t, rows);
+ }
+ }
+
+ @Test
+ public void testMultiThreads() {
+ // given
+ String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}};
+ int rows = 10;
+
+ // when
+ List threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList());
+ threads.forEach(Thread::start);
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ // then
+ for (String[] table : tables) {
+ for (String t : table) {
+ assertRows(t, rows);
+ }
+ }
+
+ }
+
+ private void assertRows(String tbname, int rows) {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + tbname);
+ while (rs.next()) {
+ int count = rs.getInt(1);
+ Assert.assertEquals(rows, count);
+ }
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void insertIntoTables(String[] tbnames, int rowsEachTable) {
+ long current = System.currentTimeMillis();
+ String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)";
+ try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
+ for (int i = 0; i < tbnames.length; i++) {
+ pstmt.setTableName(tbnames[i]);
+ pstmt.setTagInt(0, random.nextInt(100));
+ pstmt.setTagInt(1, random.nextInt(100));
+
+ ArrayList timestampList = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ timestampList.add(current + i * 1000 + j);
+ }
+ pstmt.setTimestamp(0, timestampList);
+
+ ArrayList f1List = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ f1List.add(random.nextInt(100));
+ }
+ pstmt.setInt(1, f1List);
+
+ ArrayList f2List = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ f2List.add(random.nextInt(100));
+ }
+ pstmt.setInt(2, f2List);
+
+ pstmt.columnDataAddBatch();
+ }
+
+ pstmt.columnDataExecuteBatch();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Before
+ public void before() {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ try {
+ conn = DriverManager.getConnection(url);
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test_pd");
+ stmt.execute("create database if not exists test_pd");
+ stmt.execute("use test_pd");
+ stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try {
+// Statement stmt = conn.createStatement();
+// stmt.execute("drop database if exists test_pd");
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java
index cae33f18e7a04e443092d8e696bb32be9600a435..c540fa77aa75b9becb5735c1765fe35d1948a27d 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java
@@ -2,7 +2,6 @@ package com.taosdata.jdbc.utils;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
-import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError;
import org.junit.Test;
@@ -11,7 +10,6 @@ import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.List;
-import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@@ -27,11 +25,6 @@ public class HttpClientPoolUtilTest {
// given
List threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> {
useDB();
-// try {
-// TimeUnit.SECONDS.sleep(10);
-// } catch (InterruptedException e) {
-// e.printStackTrace();
-// }
})).collect(Collectors.toList());
threads.forEach(Thread::start);
@@ -43,7 +36,6 @@ public class HttpClientPoolUtilTest {
e.printStackTrace();
}
}
-
}
private void useDB() {
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
index 5b48374e8f7d54bef4d199ff9398aaf6a74b257e..18b6122d52eae98a377bceb81a9d43af21877bd4 100644
--- a/src/kit/taosdump/CMakeLists.txt
+++ b/src/kit/taosdump/CMakeLists.txt
@@ -3,6 +3,7 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(. SRC)
@@ -61,12 +62,22 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
+LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
+
IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
+ IF (AVRO_SUPPORT)
+ TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdump taos_static)
+ ENDIF()
ELSE ()
- TARGET_LINK_LIBRARIES(taosdump taos cJson)
+ IF (AVRO_SUPPORT)
+ TARGET_LINK_LIBRARIES(taosdump taos avro jansson)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdump taos)
+ ENDIF ()
ENDIF ()
ENDIF ()
@@ -74,8 +85,8 @@ IF (TD_DARWIN)
# missing for macosx
# ADD_EXECUTABLE(taosdump ${SRC})
# IF (TD_SOMODE_STATIC)
- # TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
+ # TARGET_LINK_LIBRARIES(taosdump taos_static jansson)
# ELSE ()
- # TARGET_LINK_LIBRARIES(taosdump taos cJson)
+ # TARGET_LINK_LIBRARIES(taosdump taos jansson)
# ENDIF ()
ENDIF ()
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 69ec2968218a9e5b2ca34551c60b6c44256298d2..66f5dd3c9a2e5a9dbb0049b6c3d1ed397d98f6b0 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -28,15 +28,24 @@
#include "tsdb.h"
#include "tutil.h"
-#define AVRO_SUPPORT 0
-#if AVRO_SUPPORT == 1
+static char **g_tsDumpInSqlFiles = NULL;
+static char g_tsCharset[63] = {0};
+
+#ifdef AVRO_SUPPORT
#include
-#endif
+#include
+
+static char **g_tsDumpInAvroFiles = NULL;
+
+static void print_json_aux(json_t *element, int indent);
+
+#endif /* AVRO_SUPPORT */
#define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
+#define MAX_PATH_LEN 4096 // max path length on linux is 4095
#define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql"
@@ -46,8 +55,6 @@
static int converStringToReadable(char *str, int size, char *buf, int bufsize);
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
-static void dumpCharset(FILE *fp);
-static void loadFileCharset(FILE *fp, char *fcharset);
typedef struct {
short bytes;
@@ -64,7 +71,12 @@ typedef struct {
#define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \
- fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
+ fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
+
+#define warnPrint(fmt, ...) \
+ do { fprintf(stderr, "\033[33m"); \
+ fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "\033[31m"); \
@@ -208,14 +220,13 @@ typedef struct {
typedef struct {
pthread_t threadID;
int32_t threadIndex;
- int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
int precision;
TAOS *taos;
int64_t rowsOfDumpOut;
- int64_t tablesOfDumpOut;
- int64_t tableFrom;
+ int64_t count;
+ int64_t from;
} threadInfo;
typedef struct {
@@ -225,6 +236,44 @@ typedef struct {
int32_t totalDatabasesOfDumpOut;
} resultStatistics;
+#ifdef AVRO_SUPPORT
+
+enum enAvro_Codec {
+ AVRO_CODEC_START = 0,
+ AVRO_CODEC_NULL = AVRO_CODEC_START,
+ AVRO_CODEC_DEFLATE,
+ AVRO_CODEC_SNAPPY,
+ AVRO_CODEC_LZMA,
+ AVRO_CODEC_UNKNOWN = 255
+};
+
+char *g_avro_codec[] = {
+ "null",
+ "deflate",
+ "snappy",
+ "lzma",
+ "unknown"
+};
+
+/* avro sectin begin */
+#define RECORD_NAME_LEN 64
+#define FIELD_NAME_LEN 64
+#define TYPE_NAME_LEN 16
+
+typedef struct FieldStruct_S {
+ char name[FIELD_NAME_LEN];
+ char type[TYPE_NAME_LEN];
+} FieldStruct;
+
+typedef struct RecordSchema_S {
+ char name[RECORD_NAME_LEN];
+ char *fields;
+ int num_fields;
+} RecordSchema;
+
+/* avro section end */
+#endif
+
static int64_t g_totalDumpOutRows = 0;
SDbInfo **g_dbInfos = NULL;
@@ -276,14 +325,17 @@ static struct argp_option options[] = {
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
- {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
- {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
- {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
- {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
- {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
- {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
- {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
- {"debug", 'g', 0, 0, "Print debug info.", 8},
+#ifdef AVRO_SUPPORT
+ {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3},
+ {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4},
+#endif
+ {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8},
+ {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9},
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10},
+ {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10},
+ {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10},
+ {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10},
+ {"debug", 'g', 0, 0, "Print debug info.", 15},
{0}
};
@@ -310,7 +362,10 @@ typedef struct arguments {
// dump format option
bool schemaonly;
bool with_property;
+#ifdef AVRO_SUPPORT
bool avro;
+ int avro_codec;
+#endif
int64_t start_time;
char humanStartTime[HUMAN_TIME_LEN];
int64_t end_time;
@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-static int dumpOut();
-static int dumpIn();
-static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
- FILE *fp);
-static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
- FILE *fp, char* dbName);
-static int getTableDes(
- char* dbName, char *table,
- TableDef *stableDes, bool isSuperTable);
-static int64_t dumpTableData(FILE *fp, char *tbName,
- char* dbName,
- int precision,
- char *jsonAvroSchema);
-static int checkParam();
-static void freeDbInfos();
-
struct arguments g_args = {
// connection option
NULL,
@@ -381,7 +420,10 @@ struct arguments g_args = {
// dump format option
false, // schemaonly
true, // with_property
- false, // avro format
+#ifdef AVRO_SUPPORT
+ false, // avro
+ AVRO_CODEC_SNAPPY, // avro_codec
+#endif
-INT64_MAX + 1, // start_time
{0}, // humanStartTime
INT64_MAX, // end_time
@@ -392,7 +434,7 @@ struct arguments g_args = {
1, // table_batch
false, // allow_sys
// other options
- 5, // thread_num
+ 8, // thread_num
0, // abort
NULL, // arg_list
0, // arg_list_len
@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
}
break;
+#ifdef AVRO_SUPPORT
+ case 'v':
+ g_args.avro = true;
+ break;
+
+ case 'd':
+ for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) {
+ if (0 == strcmp(arg, g_avro_codec[i])) {
+ g_args.avro_codec = i;
+ break;
+ }
+ }
+ break;
+#endif
+
case 'r':
g_args.resultFile = arg;
break;
@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'N':
g_args.with_property = false;
break;
- case 'v':
- g_args.avro = true;
- break;
case 'S':
// parse time here.
break;
@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.abort = 1;
break;
case ARGP_KEY_ARG:
- g_args.arg_list = &state->argv[state->next - 1];
- g_args.arg_list_len = state->argc - state->next + 1;
+ if (strlen(state->argv[state->next - 1])) {
+ g_args.arg_list = &state->argv[state->next - 1];
+ g_args.arg_list_len = state->argc - state->next + 1;
+ }
state->next = state->argc;
break;
@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause(
for (; counter < numOfCols; counter++) {
if (counter != count_temp) {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, ", \'%s\'",
@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause(
pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
}
} else {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value);
@@ -1050,1902 +1106,2717 @@ static void dumpCreateMTableClause(
free(tmpBuf);
}
-static int convertTbDesToAvroSchema(
- char *dbName, char *tbName, TableDef *tableDes, int colCount,
- char **avroSchema)
+static int64_t getNtbCountOfStb(char *dbName, char *stbName)
{
- errorPrint("%s() LN%d TODO: covert table schema to avro schema\n",
- __func__, __LINE__);
- // {
- // "namesapce": "database name",
- // "type": "record",
- // "name": "table name",
- // "fields": [
- // {
- // "name": "col0 name",
- // "type": "long"
- // },
- // {
- // "name": "col1 name",
- // "type": ["int", "null"]
- // },
- // {
- // "name": "col2 name",
- // "type": ["float", "null"]
- // },
- // ...
- // {
- // "name": "coln name",
- // "type": ["string", "null"]
- // }
- // ]
- // }
- *avroSchema = (char *)calloc(1,
- 17 + TSDB_DB_NAME_LEN /* dbname section */
- + 17 /* type: record */
- + 11 + TSDB_TABLE_NAME_LEN /* tbname section */
- + 10 /* fields section */
- + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
- if (*avroSchema == NULL) {
- errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
+ TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ dbName, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
return -1;
}
- char *pstr = *avroSchema;
- pstr += sprintf(pstr,
- "{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [",
- dbName, tbName);
- for (int i = 0; i < colCount; i ++) {
- if (0 == i) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "long");
- } else {
- if (strcasecmp(tableDes->cols[i].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
- tableDes->cols[i].field, "string");
- } else {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
- tableDes->cols[i].field, tableDes->cols[i].type);
- }
- }
- if ((i != (colCount -1))
- && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
- pstr += sprintf(pstr, "},");
- } else {
- pstr += sprintf(pstr, "}");
- break;
- }
+ int64_t count = 0;
+
+ char command[COMMAND_SIZE];
+
+ sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
+
+ TAOS_RES *res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
}
- pstr += sprintf(pstr, "]}");
+ TAOS_ROW row = NULL;
- debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema);
+ if ((row = taos_fetch_row(res)) != NULL) {
+ count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
+ }
- return 0;
+ taos_close(taos);
+ return count;
}
-static int64_t dumpNormalTable(
- char *dbName,
- char *stable,
- char *tbName,
- int precision,
- FILE *fp
- ) {
+static int getTableDes(
+ TAOS *taos,
+ char* dbName, char *table,
+ TableDef *tableDes, bool isSuperTable) {
+ TAOS_ROW row = NULL;
+ TAOS_RES* res = NULL;
int colCount = 0;
- TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
- + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
- colCount = getTableDes(dbName, tbName, tableDes, false);
-
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
+ char sqlstr[COMMAND_SIZE];
+ sprintf(sqlstr, "describe %s.%s;", dbName, table);
- // create child-table using super-table
- dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
- } else { // dump table definition
- colCount = getTableDes(dbName, tbName, tableDes, false);
+ res = taos_query(taos, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- // create normal-table or super-table
- dumpCreateTableClause(tableDes, colCount, fp, dbName);
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
+ while ((row = taos_fetch_row(res)) != NULL) {
+ tstrncpy(tableDes->cols[colCount].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ min(TSDB_COL_NAME_LEN + 1,
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
+ tstrncpy(tableDes->cols[colCount].type,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
+ tableDes->cols[colCount].length =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(tableDes->cols[colCount].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(COL_NOTE_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
+ colCount++;
}
- char *jsonAvroSchema = NULL;
- if (g_args.avro) {
- if (0 != convertTbDesToAvroSchema(
- dbName, tbName, tableDes, colCount, &jsonAvroSchema)) {
- errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n",
- __func__,
- __LINE__);
- freeTbDes(tableDes);
- return -1;
- }
- }
+ taos_free_result(res);
+ res = NULL;
- int64_t ret = 0;
- if (!g_args.schemaonly) {
- ret = dumpTableData(fp, tbName, dbName, precision,
- jsonAvroSchema);
+ if (isSuperTable) {
+ return colCount;
}
- tfree(jsonAvroSchema);
- freeTbDes(tableDes);
- return ret;
-}
+ // if child-table have tag, using select tagName from table to get tagValue
+ for (int i = 0 ; i < colCount; i++) {
+ if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
-static int64_t dumpNormalTableBelongStb(
- SDbInfo *dbInfo, char *stbName, char *ntbName)
-{
- int64_t count = 0;
+ sprintf(sqlstr, "select %s from %s.%s",
+ tableDes->cols[i].field, dbName, table);
- char tmpBuf[4096] = {0};
- FILE *fp = NULL;
+ res = taos_query(taos, sqlstr);
+ code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
+ fields = taos_fetch_fields(res);
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
- }
+ row = taos_fetch_row(res);
+ if (NULL == row) {
+ errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
- count = dumpNormalTable(
- dbInfo->name,
- stbName,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
+ if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
+ sprintf(tableDes->cols[i].note, "%s", "NUL");
+ sprintf(tableDes->cols[i].value, "%s", "NULL");
+ taos_free_result(res);
+ res = NULL;
+ continue;
+ }
- fclose(fp);
- return count;
-}
+ int32_t* length = taos_fetch_lengths(res);
-static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName)
-{
- int64_t count = 0;
-
- char tmpBuf[4096] = {0};
- FILE *fp = NULL;
+ //int32_t* length = taos_fetch_lengths(tmpResult);
+ switch (fields[0].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ sprintf(tableDes->cols[i].value, "%d",
+ ((((int32_t)(*((char *)
+ row[TSDB_SHOW_TABLES_NAME_INDEX])))==1)
+ ?1:0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ sprintf(tableDes->cols[i].value, "%" PRId64 "",
+ *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ sprintf(tableDes->cols[i].value, "%f",
+ GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ sprintf(tableDes->cols[i].value, "%f",
+ GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ memset(tableDes->cols[i].value, 0,
+ sizeof(tableDes->cols[i].value));
+ int len = strlen((char *)row[0]);
+ // FIXME for long value
+ if (len < (COL_VALUEBUF_LEN - 2)) {
+ converStringToReadable(
+ (char *)row[0],
+ length[0],
+ tableDes->cols[i].value,
+ len);
+ } else {
+ tableDes->cols[i].var_value = calloc(1, len * 2);
+ if (tableDes->cols[i].var_value == NULL) {
+ errorPrint("%s() LN%d, memory alalocation failed!\n",
+ __func__, __LINE__);
+ taos_free_result(res);
+ return -1;
+ }
+ converStringToReadable((char *)row[0],
+ length[0],
+ (char *)(tableDes->cols[i].var_value), len);
+ }
+ break;
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
+ char tbuf[COMMAND_SIZE-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable(
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ length[0], tbuf, COMMAND_SIZE-2);
+ sprintf(tableDes->cols[i].value, "%s", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+#if 0
+ if (!g_args.mysqlFlag) {
+ sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
+ }
+#endif
+ break;
+ default:
+ break;
+ }
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
+ taos_free_result(res);
}
- count = dumpNormalTable(
- dbInfo->name,
- NULL,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
-
- fclose(fp);
- return count;
+ return colCount;
}
-static void *dumpNtbOfDb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
- debugPrint("dump table count = \t%"PRId64"\n",
- pThreadInfo->tablesOfDumpOut);
+static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName) {
+ int counter = 0;
+ int count_temp = 0;
+ char sqlstr[COMMAND_SIZE];
- FILE *fp = NULL;
- char tmpBuf[4096] = {0};
+ char* pstr = sqlstr;
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%d.sql",
- g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%d.sql",
- pThreadInfo->dbName, pThreadInfo->threadIndex);
- }
+ pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
+ dbName, tableDes->name);
- fp = fopen(tmpBuf, "w");
+ for (; counter < numOfCols; counter++) {
+ if (tableDes->cols[counter].note[0] != '\0') break;
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
- }
+ if (counter == 0) {
+ pstr += sprintf(pstr, " (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ } else {
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ }
- int64_t count;
- for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) {
- debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
- pThreadInfo->threadIndex, i,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name);
- count = dumpNormalTable(
- pThreadInfo->dbName,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name,
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
+ pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
- fclose(fp);
- return NULL;
-}
-
-static void *dumpNormalTablesOfStb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
- debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut);
-
- char command[COMMAND_SIZE];
+ count_temp = counter;
- sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
- pThreadInfo->dbName, pThreadInfo->stbName,
- pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom);
+ for (; counter < numOfCols; counter++) {
+ if (counter == count_temp) {
+ pstr += sprintf(pstr, ") TAGS (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ } else {
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ }
- TAOS_RES *res = taos_query(pThreadInfo->taos, command);
- int32_t code = taos_errno(res);
- if (code) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return NULL;
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
+ pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
+ }
}
- FILE *fp = NULL;
- char tmpBuf[4096] = {0};
+ pstr += sprintf(pstr, ");");
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.%d.sql",
- g_args.outpath,
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%s.%d.sql",
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- }
+ debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
+ return fprintf(fp, "%s\n\n", sqlstr);
+}
- fp = fopen(tmpBuf, "w");
+static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp)
+{
+ uint64_t sizeOfTableDes =
+ (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
+ TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
+ if (NULL == tableDes) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, sizeOfTableDes);
+ exit(-1);
}
- TAOS_ROW row = NULL;
- int64_t i = 0;
- int64_t count;
- while((row = taos_fetch_row(res)) != NULL) {
- debugPrint("[%d] sub table %"PRId64": name: %s\n",
- pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ int colCount = getTableDes(taos, dbInfo->name,
+ stbName, tableDes, true);
- count = dumpNormalTable(
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
- }
+ if (colCount < 0) {
+ free(tableDes);
+ errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
+ __func__, __LINE__, stbName);
+ exit(-1);
}
- fclose(fp);
- return NULL;
+ dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
+ free(tableDes);
+
+ return 0;
}
-static int64_t dumpNtbOfDbByThreads(
- SDbInfo *dbInfo,
- int64_t ntbCount)
+static int64_t dumpCreateSTableClauseOfDb(
+ SDbInfo *dbInfo, FILE *fp)
{
- if (ntbCount <= 0) {
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbInfo->name, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbInfo->name);
return 0;
}
- int threads = g_args.thread_num;
+ TAOS_ROW row;
+ char command[COMMAND_SIZE] = {0};
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
+ sprintf(command, "SHOW %s.STABLES", dbInfo->name);
- assert(threads);
- int64_t b = ntbCount % threads;
-
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- assert(pids);
- assert(infos);
-
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
+ TAOS_RES* res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ exit(-1);
+ }
- return -1;
+ int64_t superTblCnt = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ if (0 == dumpStableClasuse(taos, dbInfo,
+ row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
+ superTblCnt ++;
}
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->tableFrom +
- ((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
}
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
+ taos_free_result(res);
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- taos_close(pThreadInfo->taos);
- }
+ fprintf(g_fpOfResult,
+ "# super table counter: %"PRId64"\n",
+ superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- free(pids);
- free(infos);
+ taos_close(taos);
- return 0;
+ return superTblCnt;
}
-static int64_t getNtbCountOfStb(char *dbName, char *stbName)
-{
- TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
- dbName, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return -1;
- }
+static void dumpCreateDbClause(
+ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- int64_t count = 0;
+ char *pstr = sqlstr;
+ pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
+ if (isDumpProperty) {
+ pstr += sprintf(pstr,
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days,
+ dbInfo->keeplist,
+ dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
+ dbInfo->fsync,
+ dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
+ }
- char command[COMMAND_SIZE];
+ pstr += sprintf(pstr, ";");
+ fprintf(fp, "%s\n\n", sqlstr);
+}
- sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
+static FILE* openDumpInFile(char *fptr) {
+ wordexp_t full_path;
- TAOS_RES *res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
+ if (wordexp(fptr, &full_path, 0) != 0) {
+ errorPrint("illegal file name: %s\n", fptr);
+ return NULL;
}
- TAOS_ROW row = NULL;
+ char *fname = full_path.we_wordv[0];
- if ((row = taos_fetch_row(res)) != NULL) {
- count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
+ FILE *f = NULL;
+ if ((fname) && (strlen(fname) > 0)) {
+ f = fopen(fname, "r");
+ if (f == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, fname);
+ }
}
- taos_close(taos);
- return count;
+ wordfree(&full_path);
+ return f;
}
-static int64_t dumpNtbOfStbByThreads(
- SDbInfo *dbInfo, char *stbName)
+static uint64_t getFilesNum(char *ext)
{
- int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
+ uint64_t count = 0;
- if (ntbCount <= 0) {
- return 0;
- }
-
- int threads = g_args.thread_num;
-
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
+ int namelen, extlen;
+ struct dirent *pDirent;
+ DIR *pDir;
- assert(threads);
- int64_t b = ntbCount % threads;
+ extlen = strlen(ext);
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- assert(pids);
- assert(infos);
+ bool isSql = (0 == strcmp(ext, "sql"));
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
+ pDir = opendir(g_args.inpath);
+ if (pDir != NULL) {
+ while ((pDirent = readdir(pDir)) != NULL) {
+ namelen = strlen (pDirent->d_name);
- return -1;
+ if (namelen > extlen) {
+ if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
+ if (isSql) {
+ if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
+ continue;
+ }
+ }
+ verbosePrint("%s found\n", pDirent->d_name);
+ count ++;
+ }
+ }
}
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->tableFrom +
- ((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- strcpy(pThreadInfo->stbName, stbName);
- pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
- }
-
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
+ closedir (pDir);
}
- int64_t records = 0;
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- records += pThreadInfo->rowsOfDumpOut;
- taos_close(pThreadInfo->taos);
- }
-
- free(pids);
- free(infos);
-
- return records;
+ debugPrint("%"PRId64" .%s files found!\n", count, ext);
+ return count;
}
-static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp)
+static void freeFileList(char **fileList, int64_t count)
{
- uint64_t sizeOfTableDes =
- (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
- if (NULL == tableDes) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, sizeOfTableDes);
- exit(-1);
- }
-
- int colCount = getTableDes(dbInfo->name,
- stbName, tableDes, true);
-
- if (colCount < 0) {
- free(tableDes);
- errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
- __func__, __LINE__, stbName);
- exit(-1);
+ for (int64_t i = 0; i < count; i++) {
+ tfree(fileList[i]);
}
-
- dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
- free(tableDes);
-
- return 0;
+ tfree(fileList);
}
-static int64_t dumpCreateSTableClauseOfDb(
- SDbInfo *dbInfo, FILE *fp)
+static void createDumpinList(char *ext, int64_t count)
{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
- }
-
- TAOS_ROW row;
- char command[COMMAND_SIZE] = {0};
-
- sprintf(command, "SHOW %s.STABLES", dbInfo->name);
+ bool isSql = (0 == strcmp(ext, "sql"));
- TAOS_RES* res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
+ if (isSql) {
+ g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *));
+ assert(g_tsDumpInSqlFiles);
- int64_t superTblCnt = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
- superTblCnt ++;
+ for (int64_t i = 0; i < count; i++) {
+ g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ assert(g_tsDumpInSqlFiles[i]);
}
}
+#ifdef AVRO_SUPPORT
+ else {
+ g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *));
+ assert(g_tsDumpInAvroFiles);
- taos_free_result(res);
+ for (int64_t i = 0; i < count; i++) {
+ g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ assert(g_tsDumpInAvroFiles[i]);
+ }
- fprintf(g_fpOfResult,
- "# super table counter: %"PRId64"\n",
- superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ }
+#endif
- taos_close(taos);
+ int namelen, extlen;
+ struct dirent *pDirent;
+ DIR *pDir;
+
+ extlen = strlen(ext);
+
+ count = 0;
+ pDir = opendir(g_args.inpath);
+ if (pDir != NULL) {
+ while ((pDirent = readdir(pDir)) != NULL) {
+ namelen = strlen (pDirent->d_name);
+
+ if (namelen > extlen) {
+ if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
+ verbosePrint("%s found\n", pDirent->d_name);
+ if (isSql) {
+ if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
+ continue;
+ }
+ strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
+ }
+#ifdef AVRO_SUPPORT
+ else {
+ strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
+ }
+#endif
+ }
+ }
+ }
+ closedir (pDir);
+ }
- return superTblCnt;
+ debugPrint("%"PRId64" .%s files filled to list!\n", count, ext);
}
-static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
-{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
- }
+#ifdef AVRO_SUPPORT
- char command[COMMAND_SIZE];
- TAOS_RES *result;
- int32_t code;
+static int convertTbDesToJson(
+ char *dbName, char *tbName, TableDef *tableDes, int colCount,
+ char **jsonSchema)
+{
+ // {
+ // "type": "record",
+ // "name": "dbname.tbname",
+ // "fields": [
+ // {
+ // "name": "col0 name",
+ // "type": "long"
+ // },
+ // {
+ // "name": "col1 name",
+ // "type": "int"
+ // },
+ // {
+ // "name": "col2 name",
+ // "type": "float"
+ // },
+ // {
+ // "name": "col3 name",
+ // "type": "boolean"
+ // },
+ // ...
+ // {
+ // "name": "coln name",
+ // "type": "string"
+ // }
+ // ]
+ // }
+ *jsonSchema = (char *)calloc(1,
+ 17 + TSDB_DB_NAME_LEN /* dbname section */
+ + 17 /* type: record */
+ + 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ + 10 /* fields section */
+ + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
+ if (*jsonSchema == NULL) {
+ errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
+ return -1;
+ }
- sprintf(command, "USE %s", dbInfo->name);
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("invalid database %s, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
+ char *pstr = *jsonSchema;
+ pstr += sprintf(pstr,
+ "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [",
+ dbName, tbName);
+ for (int i = 0; i < colCount; i ++) {
+ if (0 == i) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else {
+ if (strcasecmp(tableDes->cols[i].type, "binary") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "string");
+ } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "bytes");
+ } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "boolean");
+ } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "int");
+ } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "int");
+ } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field,
+ strtolower(tableDes->cols[i].type, tableDes->cols[i].type));
+ }
+ }
+ if ((i != (colCount -1))
+ && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
+ pstr += sprintf(pstr, "},");
+ } else {
+ pstr += sprintf(pstr, "}");
+ break;
+ }
}
- sprintf(command, "SHOW TABLES");
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("Failed to show %s\'s tables, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
+ pstr += sprintf(pstr, "]}");
+
+ debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema);
+
+ return 0;
+}
+
+static void print_json_indent(int indent) {
+ int i;
+ for (i = 0; i < indent; i++) {
+ putchar(' ');
}
+}
- g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
+const char *json_plural(size_t count) { return count == 1 ? "" : "s"; }
- TAOS_ROW row;
- int64_t count = 0;
- while(NULL != (row = taos_fetch_row(result))) {
- debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
- __func__, __LINE__,
- count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- tstrncpy(((TableInfo *)(g_tablesList + count))->name,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
- char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
- if (stbName) {
- tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
- (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
- ((TableInfo *)(g_tablesList + count))->belongStb = true;
- }
- count ++;
+static void print_json_object(json_t *element, int indent) {
+ size_t size;
+ const char *key;
+ json_t *value;
+
+ print_json_indent(indent);
+ size = json_object_size(element);
+
+ printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size));
+ json_object_foreach(element, key, value) {
+ print_json_indent(indent + 2);
+ printf("JSON Key: \"%s\"\n", key);
+ print_json_aux(value, indent + 2);
}
- taos_close(taos);
+}
- int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
+static void print_json_array(json_t *element, int indent) {
+ size_t i;
+ size_t size = json_array_size(element);
+ print_json_indent(indent);
- free(g_tablesList);
- g_tablesList = NULL;
+ printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size));
+ for (i = 0; i < size; i++) {
+ print_json_aux(json_array_get(element, i), indent + 2);
+ }
+}
- return records;
+static void print_json_string(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON String: \"%s\"\n", json_string_value(element));
}
-static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
-{
- dumpCreateDbClause(dbInfo, g_args.with_property, fp);
+static void print_json_integer(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element));
+}
- fprintf(g_fpOfResult, "\n#### database: %s\n",
- dbInfo->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+static void print_json_real(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON Real: %f\n", json_real_value(element));
+}
- dumpCreateSTableClauseOfDb(dbInfo, fp);
+static void print_json_true(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON True\n");
+}
- return dumpNTablesOfDb(dbInfo);
+static void print_json_false(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON False\n");
}
-static int dumpOut() {
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
+static void print_json_null(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON Null\n");
+}
- TAOS_ROW row;
- FILE *fp = NULL;
- int32_t count = 0;
+static void print_json_aux(json_t *element, int indent)
+{
+ switch(json_typeof(element)) {
+ case JSON_OBJECT:
+ print_json_object(element, indent);
+ break;
- char tmpBuf[4096] = {0};
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
- } else {
- sprintf(tmpBuf, "dbs.sql");
- }
+ case JSON_ARRAY:
+ print_json_array(element, indent);
+ break;
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
+ case JSON_STRING:
+ print_json_string(element, indent);
+ break;
+
+ case JSON_INTEGER:
+ print_json_integer(element, indent);
+ break;
+
+ case JSON_REAL:
+ print_json_real(element, indent);
+ break;
+
+ case JSON_TRUE:
+ print_json_true(element, indent);
+ break;
+
+ case JSON_FALSE:
+ print_json_false(element, indent);
+ break;
+
+ case JSON_NULL:
+ print_json_null(element, indent);
+ break;
+
+ default:
+ fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element));
}
+}
- g_args.dumpDbCount = getDumpDbCount();
- debugPrint("%s() LN%d, dump db count: %d\n",
- __func__, __LINE__, g_args.dumpDbCount);
+static void print_json(json_t *root) { print_json_aux(root, 0); }
- if (0 == g_args.dumpDbCount) {
- errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
- fclose(fp);
- return -1;
+static json_t *load_json(char *jsonbuf)
+{
+ json_t *root;
+ json_error_t error;
+
+ root = json_loads(jsonbuf, 0, &error);
+
+ if (root) {
+ return root;
+ } else {
+ fprintf(stderr, "json error on line %d: %s\n", error.line, error.text);
+ return NULL;
}
+}
- g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
- if (g_dbInfos == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
+static RecordSchema *parse_json_to_recordschema(json_t *element)
+{
+ RecordSchema *recordSchema = malloc(sizeof(RecordSchema));
+ assert(recordSchema);
+
+ if (JSON_OBJECT != json_typeof(element)) {
+ fprintf(stderr, "%s() LN%d, json passed is not an object\n",
__func__, __LINE__);
- goto _exit_failure;
+ return NULL;
}
- char command[COMMAND_SIZE];
+ const char *key;
+ json_t *value;
+
+ json_object_foreach(element, key, value) {
+ if (0 == strcmp(key, "name")) {
+ tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1);
+ } else if (0 == strcmp(key, "fields")) {
+ if (JSON_ARRAY == json_typeof(value)) {
+
+ size_t i;
+ size_t size = json_array_size(value);
+
+ verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n",
+ __func__, __LINE__,
+ (long long)size, json_plural(size));
+
+ recordSchema->num_fields = size;
+ recordSchema->fields = malloc(sizeof(FieldStruct) * size);
+ assert(recordSchema->fields);
+
+ for (i = 0; i < size; i++) {
+ FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
+ json_t *arr_element = json_array_get(value, i);
+ const char *ele_key;
+ json_t *ele_value;
+
+ json_object_foreach(arr_element, ele_key, ele_value) {
+ if (0 == strcmp(ele_key, "name")) {
+ tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1);
+ } else if (0 == strcmp(ele_key, "type")) {
+ if (JSON_STRING == json_typeof(ele_value)) {
+ tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1);
+ } else if (JSON_OBJECT == json_typeof(ele_value)) {
+ const char *obj_key;
+ json_t *obj_value;
+
+ json_object_foreach(ele_value, obj_key, obj_value) {
+ if (0 == strcmp(obj_key, "type")) {
+ if (JSON_STRING == json_typeof(obj_value)) {
+ tstrncpy(field->type,
+ json_string_value(obj_value), TYPE_NAME_LEN-1);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ fprintf(stderr, "%s() LN%d, fields have no array\n",
+ __func__, __LINE__);
+ return NULL;
+ }
- /* Connect to server */
- taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- goto _exit_failure;
+ break;
+ }
}
- /* --------------------------------- Main Code -------------------------------- */
- /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
- /* */
- dumpCharset(fp);
+ return recordSchema;
+}
- sprintf(command, "show databases");
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
+static void freeRecordSchema(RecordSchema *recordSchema)
+{
+ if (recordSchema) {
+ if (recordSchema->fields) {
+ free(recordSchema->fields);
+ }
+ free(recordSchema);
+ }
+}
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- goto _exit_failure;
+static int64_t writeResultToAvro(
+ char *avroFilename,
+ char *jsonSchema,
+ TAOS_RES *res)
+{
+ avro_schema_t schema;
+ if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) {
+ errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n",
+ __func__, __LINE__, jsonSchema, avro_strerror());
+ exit(EXIT_FAILURE);
}
- TAOS_FIELD *fields = taos_fetch_fields(result);
+ json_t *json_root = load_json(jsonSchema);
+ debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- && (!g_args.allow_sys)) {
- continue;
+ RecordSchema *recordSchema;
+ if (json_root) {
+ if (g_args.debug_print || g_args.verbose_print) {
+ print_json(json_root);
}
- if (g_args.databases) { // input multi dbs
- if (inDatabasesSeq(
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
- continue;
- }
- } else if (!g_args.all_databases) { // only input one db
- if (strncasecmp(g_args.arg_list[0],
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
- continue;
+ recordSchema = parse_json_to_recordschema(json_root);
+ if (NULL == recordSchema) {
+ fprintf(stderr, "Failed to parse json to recordschema\n");
+ exit(EXIT_FAILURE);
}
- g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (g_dbInfos[count] == NULL) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
- goto _exit_failure;
- }
+ json_decref(json_root);
+ } else {
+ errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema);
+ exit(EXIT_FAILURE);
+ }
- okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
- tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- min(TSDB_DB_NAME_LEN,
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
- if (g_args.with_property) {
- g_dbInfos[count]->ntables =
- *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- g_dbInfos[count]->vgroups =
- *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- g_dbInfos[count]->replica =
- *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- g_dbInfos[count]->quorum =
- *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- g_dbInfos[count]->days =
- *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ avro_file_writer_t db;
- tstrncpy(g_dbInfos[count]->keeplist,
- (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
- //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
- //g_dbInfos[count]->daysToKeep1;
- //g_dbInfos[count]->daysToKeep2;
- g_dbInfos[count]->cache =
- *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- g_dbInfos[count]->blocks =
- *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- g_dbInfos[count]->minrows =
- *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- g_dbInfos[count]->maxrows =
- *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- g_dbInfos[count]->wallevel =
- *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- g_dbInfos[count]->fsync =
- *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- g_dbInfos[count]->comp =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- g_dbInfos[count]->cachelast =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+ int rval = avro_file_writer_create_with_codec
+ (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0);
+ if (rval) {
+ errorPrint("There was an error creating %s. reason: %s\n",
+ avroFilename, avro_strerror());
+ exit(EXIT_FAILURE);
+ }
- tstrncpy(g_dbInfos[count]->precision,
- (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- DB_PRECISION_LEN);
- g_dbInfos[count]->update =
- *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- }
- count++;
+ TAOS_ROW row = NULL;
- if (g_args.databases) {
- if (count > g_args.dumpDbCount)
- break;
- } else if (!g_args.all_databases) {
- if (count >= 1)
- break;
- }
- }
+ int numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- if (count == 0) {
- errorPrint("%d databases valid to dump\n", count);
- goto _exit_failure;
- }
+ avro_value_iface_t *wface =
+ avro_generic_class_from_schema(schema);
- taos_close(taos);
+ avro_value_t record;
+ avro_generic_value_new(wface, &record);
- if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
- for (int i = 0; i < count; i++) {
- int64_t records = 0;
- records = dumpWholeDatabase(g_dbInfos[i], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[i]->name);
- g_totalDumpOutRows += records;
+ int64_t count = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ avro_value_t value;
+
+ for (int col = 0; col < numFields; col++) {
+ if (0 != avro_value_get_by_name(
+ &record, fields[col].name, &value, NULL)) {
+ errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed",
+ __func__, __LINE__, fields[col].name);
+ continue;
}
- }
- } else {
- if (1 == g_args.arg_list_len) {
- int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[0]->name);
- g_totalDumpOutRows += records;
+
+ int len;
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ avro_value_set_boolean(&value,
+ ((((int32_t)(*((char *)row[col])))==1)?1:0));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ avro_value_set_int(&value, *((int8_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ avro_value_set_int(&value, *((int16_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ avro_value_set_int(&value, *((int32_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ avro_value_set_long(&value, *((int64_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ avro_value_set_float(&value, GET_FLOAT_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ avro_value_set_double(&value, GET_DOUBLE_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ avro_value_set_string(&value, (char *)row[col]);
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ len = strlen((char*)row[col]);
+ avro_value_set_bytes(&value, (void*)(row[col]),len);
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ avro_value_set_long(&value, *((int64_t *)row[col]));
+ break;
+
+ default:
+ break;
}
+ }
+
+ if (0 != avro_file_writer_append_value(db, &record)) {
+ errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n",
+ __func__, __LINE__,
+ avro_strerror());
} else {
- dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ count ++;
}
+ }
- int superTblCnt = 0 ;
- for (int i = 1; g_args.arg_list[i]; i++) {
- TableRecordInfo tableRecordInfo;
+ avro_value_decref(&record);
+ avro_value_iface_decref(wface);
+ freeRecordSchema(recordSchema);
+ avro_file_writer_close(db);
+ avro_schema_decref(schema);
- if (getTableRecordInfo(g_dbInfos[0]->name,
- g_args.arg_list[i],
- &tableRecordInfo) < 0) {
- errorPrint("input the invalid table %s\n",
- g_args.arg_list[i]);
- continue;
- }
+ return count;
+}
- int64_t records = 0;
- if (tableRecordInfo.isStb) { // dump all table of this stable
- int ret = dumpStableClasuse(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- if (ret >= 0) {
- superTblCnt++;
- records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
- }
- } else if (tableRecordInfo.belongStb){
- dumpStableClasuse(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- records = dumpNormalTableBelongStb(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- g_args.arg_list[i]);
- } else {
- records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]);
- }
+void freeBindArray(char *bindArray, int onlyCol)
+{
+ TAOS_BIND *bind;
- if (records >= 0) {
- okPrint("table: %s dumped\n", g_args.arg_list[i]);
- g_totalDumpOutRows += records;
- }
+ for (int j = 0; j < onlyCol; j++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j));
+ if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type)
+ && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) {
+ tfree(bind->buffer);
}
}
+}
- /* Close the handle and return */
- fclose(fp);
- taos_free_result(result);
- freeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return 0;
+static int dumpInOneAvroFile(char* fcharset,
+ char* encode, char *avroFilepath)
+{
+ debugPrint("avroFilepath: %s\n", avroFilepath);
-_exit_failure:
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- freeDbInfos();
- errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return -1;
-}
+ avro_file_reader_t reader;
-static int getTableDes(
- char* dbName, char *table,
- TableDef *tableDes, bool isSuperTable) {
- TAOS_ROW row = NULL;
- TAOS_RES* res = NULL;
- int colCount = 0;
+ if(avro_file_reader(avroFilepath, &reader)) {
+ fprintf(stderr, "Unable to open avro file %s: %s\n",
+ avroFilepath, avro_strerror());
+ return -1;
+ }
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbName, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbName);
+ int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4;
+ char *jsonbuf = calloc(1, buf_len);
+ assert(jsonbuf);
+
+ avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);;
+
+ avro_schema_t schema;
+ schema = avro_file_reader_get_writer_schema(reader);
+ avro_schema_to_json(schema, jsonwriter);
+
+ if (0 == strlen(jsonbuf)) {
+ errorPrint("Failed to parse avro file: %s schema. reason: %s\n",
+ avroFilepath, avro_strerror());
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
return -1;
}
+ debugPrint("Schema:\n %s\n", jsonbuf);
- char sqlstr[COMMAND_SIZE];
- sprintf(sqlstr, "describe %s.%s;", dbName, table);
+ json_t *json_root = load_json(jsonbuf);
+ debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
+ if (g_args.debug_print) {
+ print_json(json_root);
+ }
- res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
+ const char *namespace = avro_schema_namespace((const avro_schema_t)schema);
+ debugPrint("Namespace: %s\n", namespace);
+
+ TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ namespace, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ return -1;
+ }
+
+ TAOS_STMT *stmt = taos_stmt_init(taos);
+ if (NULL == stmt) {
taos_close(taos);
+ errorPrint("%s() LN%d, stmt init failed! reason: %s\n",
+ __func__, __LINE__, taos_errstr(NULL));
return -1;
}
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ RecordSchema *recordSchema = parse_json_to_recordschema(json_root);
+ if (NULL == recordSchema) {
+ errorPrint("Failed to parse json to recordschema. reason: %s\n",
+ avro_strerror());
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
+ json_decref(json_root);
- tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
- while ((row = taos_fetch_row(res)) != NULL) {
- tstrncpy(tableDes->cols[colCount].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- min(TSDB_COL_NAME_LEN + 1,
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
- tstrncpy(tableDes->cols[colCount].type,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
- tableDes->cols[colCount].length =
- *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- tstrncpy(tableDes->cols[colCount].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- min(COL_NOTE_LEN,
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
- colCount++;
+ TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ + sizeof(ColDes) * TSDB_MAX_COLUMNS);
+
+ int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false);
+
+ if (allColCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ recordSchema->name);
+ free(tableDes);
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
}
- taos_free_result(res);
- res = NULL;
+ char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
+ assert(stmtBuffer);
+ char *pstr = stmtBuffer;
+ pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
- if (isSuperTable) {
- return colCount;
+ int onlyCol = 1; // at least timestamp
+ for (int col = 1; col < allColCount; col++) {
+ if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue;
+ pstr += sprintf(pstr, ",?");
+ onlyCol ++;
}
+ pstr += sprintf(pstr, ")");
- // if child-table have tag, using select tagName from table to get tagValue
- for (int i = 0 ; i < colCount; i++) {
- if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
+ if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) {
+ errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n",
+ taos_stmt_errstr(stmt));
- sprintf(sqlstr, "select %s from %s.%s",
- tableDes->cols[i].field, dbName, table);
+ free(stmtBuffer);
+ free(tableDes);
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
- res = taos_query(taos, sqlstr);
- code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
+ if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) {
+ errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n",
+ recordSchema->name, taos_stmt_errstr(stmt));
- fields = taos_fetch_fields(res);
+ free(stmtBuffer);
+ free(tableDes);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
+
+ avro_value_iface_t *value_class = avro_generic_class_from_schema(schema);
+ avro_value_t value;
+ avro_generic_value_new(value_class, &value);
+
+ char *bindArray =
+ malloc(sizeof(TAOS_BIND) * onlyCol);
+ assert(bindArray);
+
+ int success = 0;
+ int failed = 0;
+ while(!avro_file_reader_read_value(reader, &value)) {
+ memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol);
+ TAOS_BIND *bind;
+
+ for (int i = 0; i < recordSchema->num_fields; i++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
+
+ avro_value_t field_value;
+
+ FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
+
+ if (0 == i) {
+ int64_t *ts = malloc(sizeof(int64_t));
+ assert(ts);
+
+ avro_value_get_by_name(&value, field->name, &field_value, NULL);
+ avro_value_get_long(&field_value, ts);
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = ts;
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ } else if (0 == avro_value_get_by_name(
+ &value, field->name, &field_value, NULL)) {
+ if (0 == strcasecmp(tableDes->cols[i].type, "int")) {
+ int32_t *n32 = malloc(sizeof(int32_t));
+ assert(n32);
+
+ avro_value_get_int(&field_value, n32);
+ debugPrint("%d | ", *n32);
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = n32;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) {
+ int32_t *n8 = malloc(sizeof(int32_t));
+ assert(n8);
+
+ avro_value_get_int(&field_value, n8);
+ debugPrint("%d | ", *n8);
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = (int8_t *)n8;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) {
+ int32_t *n16 = malloc(sizeof(int32_t));
+ assert(n16);
+
+ avro_value_get_int(&field_value, n16);
+ debugPrint("%d | ", *n16);
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = (int32_t*)n16;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) {
+ int64_t *n64 = malloc(sizeof(int64_t));
+ assert(n64);
+
+ avro_value_get_long(&field_value, n64);
+ debugPrint("%"PRId64" | ", *n64);
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = n64;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) {
+ int64_t *n64 = malloc(sizeof(int64_t));
+ assert(n64);
+
+ avro_value_get_long(&field_value, n64);
+ debugPrint("%"PRId64" | ", *n64);
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = n64;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) {
+ float *f = malloc(sizeof(float));
+ assert(f);
+
+ avro_value_get_float(&field_value, f);
+ debugPrint("%f | ", *f);
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ bind->buffer = f;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) {
+ double *dbl = malloc(sizeof(double));
+ assert(dbl);
+
+ avro_value_get_double(&field_value, dbl);
+ debugPrint("%f | ", *dbl);
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ bind->buffer = dbl;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) {
+ size_t size;
+
+ char *buf = NULL;
+ avro_value_get_string(&field_value, (const char **)&buf, &size);
+ debugPrint("%s | ", (char *)buf);
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ bind->buffer_length = tableDes->cols[i].length;
+ bind->buffer = buf;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) {
+ size_t bytessize;
+ void *bytesbuf = NULL;
+
+ avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize);
+ debugPrint("%s | ", (char*)bytesbuf);
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ bind->buffer_length = tableDes->cols[i].length;
+ bind->buffer = bytesbuf;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) {
+ int32_t *bl = malloc(sizeof(int32_t));
+ assert(bl);
+
+ avro_value_get_boolean(&field_value, bl);
+ debugPrint("%s | ", (*bl)?"true":"false");
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = (int8_t*)bl;
+ }
+
+ bind->length = &bind->buffer_length;
+ bind->is_null = NULL;
+ }
- row = taos_fetch_row(res);
- if (NULL == row) {
- errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
}
+ debugPrint("%s", "\n");
- if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
- sprintf(tableDes->cols[i].note, "%s", "NUL");
- sprintf(tableDes->cols[i].value, "%s", "NULL");
- taos_free_result(res);
- res = NULL;
+ if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ freeBindArray(bindArray, onlyCol);
+ failed --;
+ continue;
+ }
+ if (0 != taos_stmt_add_batch(stmt)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ freeBindArray(bindArray, onlyCol);
+ failed --;
continue;
}
- int32_t* length = taos_fetch_lengths(res);
-
- //int32_t* length = taos_fetch_lengths(tmpResult);
- switch (fields[0].type) {
- case TSDB_DATA_TYPE_BOOL:
- sprintf(tableDes->cols[i].value, "%d",
- ((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_INT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- sprintf(tableDes->cols[i].value, "%" PRId64 "",
- *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- sprintf(tableDes->cols[i].value, "%f",
- GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- sprintf(tableDes->cols[i].value, "%f",
- GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- memset(tableDes->cols[i].value, 0,
- sizeof(tableDes->cols[i].value));
- int len = strlen((char *)row[0]);
- // FIXME for long value
- if (len < (COL_VALUEBUF_LEN - 2)) {
- converStringToReadable(
- (char *)row[0],
- length[0],
- tableDes->cols[i].value,
- len);
- } else {
- tableDes->cols[i].var_value = calloc(1, len * 2);
- if (tableDes->cols[i].var_value == NULL) {
- errorPrint("%s() LN%d, memory alalocation failed!\n",
- __func__, __LINE__);
- taos_free_result(res);
- return -1;
- }
- converStringToReadable((char *)row[0],
- length[0],
- (char *)(tableDes->cols[i].var_value), len);
- }
- break;
+ freeBindArray(bindArray, onlyCol);
- case TSDB_DATA_TYPE_NCHAR:
- {
- memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
- convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN);
- sprintf(tableDes->cols[i].value, "%s", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
-#if 0
- if (!g_args.mysqlFlag) {
- sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
- }
-#endif
- break;
- default:
- break;
- }
+ success ++;
+ continue;
+ }
- taos_free_result(res);
+ if (0 != taos_stmt_execute(stmt)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ failed = success;
}
+ avro_value_decref(&value);
+ avro_value_iface_decref(value_class);
+
+ tfree(bindArray);
+
+ tfree(stmtBuffer);
+ tfree(tableDes);
+
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+
+ tfree(jsonbuf);
+
+ taos_stmt_close(stmt);
taos_close(taos);
- return colCount;
+
+ if (failed < 0)
+ return failed;
+ return success;
}
-static void dumpCreateDbClause(
- SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+static void* dumpInAvroWorkThreadFp(void *arg)
+{
+ threadInfo *pThread = (threadInfo*)arg;
+ setThreadName("dumpInAvroWorkThrd");
+ verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n",
+ pThread->threadIndex, pThread->count, pThread->from);
+
+ for (int64_t i = 0; i < pThread->count; i++) {
+ char avroFile[MAX_PATH_LEN];
+ sprintf(avroFile, "%s/%s", g_args.inpath,
+ g_tsDumpInAvroFiles[pThread->from + i]);
+
+ if (0 == dumpInOneAvroFile(g_tsCharset,
+ g_args.encode,
+ avroFile)) {
+ okPrint("[%d] Success dump in file: %s\n",
+ pThread->threadIndex, avroFile);
+ }
+ }
- char *pstr = sqlstr;
- pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
- if (isDumpProperty) {
- pstr += sprintf(pstr,
- "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->replica, dbInfo->quorum, dbInfo->days,
- dbInfo->keeplist,
- dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
- dbInfo->fsync,
- dbInfo->cachelast,
- dbInfo->comp, dbInfo->precision, dbInfo->update);
+ return NULL;
+}
+
+static int64_t dumpInAvroWorkThreads()
+{
+ int64_t ret = 0;
+
+ int32_t threads = g_args.thread_num;
+
+ uint64_t avroFileCount = getFilesNum("avro");
+ if (0 == avroFileCount) {
+ debugPrint("No .avro file found in %s\n", g_args.inpath);
+ return 0;
}
- pstr += sprintf(pstr, ";");
- fprintf(fp, "%s\n\n", sqlstr);
+ createDumpinList("avro", avroFileCount);
+
+ threadInfo *pThread;
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = (threadInfo *)calloc(
+ threads, sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
+
+ int64_t a = avroFileCount / threads;
+ if (a < 1) {
+ threads = avroFileCount;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = avroFileCount % threads;
+ }
+
+ int64_t from = 0;
+
+ for (int32_t t = 0; t < threads; ++t) {
+ pThread = infos + t;
+ pThread->threadIndex = t;
+
+ pThread->from = from;
+ pThread->count = tcount;
+ verbosePrint(
+ "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n",
+ t, pThread->count, pThread->from);
+
+ if (pthread_create(pids + t, NULL,
+ dumpInAvroWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread[%d] failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ for (int t = 0; t < threads; ++t) {
+ pthread_join(pids[t], NULL);
+ }
+
+ free(infos);
+ free(pids);
+
+ freeFileList(g_tsDumpInAvroFiles, avroFileCount);
+
+ return ret;
}
-static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
- FILE *fp, char* dbName) {
- int counter = 0;
- int count_temp = 0;
- char sqlstr[COMMAND_SIZE];
+#endif /* AVRO_SUPPORT */
- char* pstr = sqlstr;
+static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
+{
+ int64_t totalRows = 0;
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
- dbName, tableDes->name);
+ int32_t sql_buf_len = g_args.max_sql_len;
+ char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
+ if (tmpBuffer == NULL) {
+ errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
+ return -1;
+ }
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
+ char *pstr = tmpBuffer;
- if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+ TAOS_ROW row = NULL;
+ int rowFlag = 0;
+ int64_t lastRowsPrint = 5000000;
+ int count = 0;
+
+ int numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ int32_t curr_sqlstr_len = 0;
+ int32_t total_sqlstr_len = 0;
+
+ while ((row = taos_fetch_row(res)) != NULL) {
+ curr_sqlstr_len = 0;
+
+ int32_t* length = taos_fetch_lengths(res); // act len
+
+ if (count == 0) {
+ total_sqlstr_len = 0;
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+ if (g_args.mysqlFlag) {
+ if (0 == rowFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ rowFlag++;
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ }
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ }
}
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
- }
+ for (int col = 0; col < numFields; col++) {
+ if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+
+ if (row[col] == NULL) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
+ continue;
+ }
+
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ ((((int32_t)(*((char *)row[col])))==1)?1:0));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int8_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int16_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int32_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "%" PRId64 "",
+ *((int64_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
+ GET_FLOAT_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
+ GET_DOUBLE_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ converStringToReadable((char *)row[col], length[col],
+ tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ convertNCharToReadable((char *)row[col], length[col],
+ tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!g_args.mysqlFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "%" PRId64 "",
+ *(int64_t *)row[col]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[col]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s.%03d\'",
+ buf, (int)(ts % 1000));
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+
+ totalRows++;
+ count++;
+ fprintf(fp, "%s", tmpBuffer);
+
+ if (totalRows >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
+ totalRows, dbName, tbName);
+ lastRowsPrint += 5000000;
+ }
+
+ total_sqlstr_len += curr_sqlstr_len;
+
+ if ((count >= g_args.data_batch)
+ || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
+ fprintf(fp, ";\n");
+ count = 0;
+ }
+ }
+
+ debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
+
+ fprintf(fp, "\n");
+ free(tmpBuffer);
+
+ return totalRows;
+}
+
+static int64_t dumpTableData(FILE *fp, char *tbName,
+ char* dbName, int precision,
+ char *jsonSchema) {
+ int64_t totalRows = 0;
+
+ char sqlstr[1024] = {0};
+
+ int64_t start_time, end_time;
+ if (strlen(g_args.humanStartTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanStartTime, &start_time,
+ strlen(g_args.humanStartTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n",
+ g_args.humanStartTime);
+ return -1;
+ }
+ } else {
+ start_time = g_args.start_time;
+ }
+
+ if (strlen(g_args.humanEndTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
+ return -1;
+ }
+ } else {
+ end_time = g_args.end_time;
+ }
+
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ dbName, tbName, start_time, end_time);
+
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbName, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbName);
+ return -1;
+ }
+
+ TAOS_RES* res = taos_query(taos, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("failed to run command %s, reason: %s\n",
+ sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
+
+#ifdef AVRO_SUPPORT
+ if (g_args.avro) {
+ char avroFilename[MAX_PATH_LEN] = {0};
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(avroFilename, "%s/%s.%s.avro",
+ g_args.outpath, dbName, tbName);
+ } else {
+ sprintf(avroFilename, "%s.%s.avro",
+ dbName, tbName);
+ }
+
+ totalRows = writeResultToAvro(avroFilename, jsonSchema, res);
+ } else
+#endif
+ totalRows = writeResultToSql(res, fp, dbName, tbName);
+
+ taos_free_result(res);
+ taos_close(taos);
+ return totalRows;
+}
+
+static int64_t dumpNormalTable(
+ TAOS *taos,
+ char *dbName,
+ char *stable,
+ char *tbName,
+ int precision,
+ FILE *fp
+ ) {
+ int colCount = 0;
+
+ TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ + sizeof(ColDes) * TSDB_MAX_COLUMNS);
+
+ if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
+ colCount = getTableDes(taos, dbName, tbName, tableDes, false);
+
+ if (colCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ tbName);
+ free(tableDes);
+ return -1;
+ }
+
+ // create child-table using super-table
+ dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
+ } else { // dump table definition
+ colCount = getTableDes(taos, dbName, tbName, tableDes, false);
+
+ if (colCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ tbName);
+ free(tableDes);
+ return -1;
+ }
+
+ // create normal-table or super-table
+ dumpCreateTableClause(tableDes, colCount, fp, dbName);
+ }
+
+ char *jsonSchema = NULL;
+#ifdef AVRO_SUPPORT
+ if (g_args.avro) {
+ if (0 != convertTbDesToJson(
+ dbName, tbName, tableDes, colCount, &jsonSchema)) {
+ errorPrint("%s() LN%d, convertTbDesToJson failed\n",
+ __func__,
+ __LINE__);
+ freeTbDes(tableDes);
+ return -1;
+ }
+ }
+#endif
+
+ int64_t totalRows = 0;
+ if (!g_args.schemaonly) {
+ totalRows = dumpTableData(fp, tbName, dbName, precision,
+ jsonSchema);
+ }
+
+ tfree(jsonSchema);
+ freeTbDes(tableDes);
+ return totalRows;
+}
+
+static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName)
+{
+ int64_t count = 0;
+
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ FILE *fp = NULL;
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.sql",
+ g_args.outpath, dbInfo->name, ntbName);
+ } else {
+ sprintf(tmpBuf, "%s.%s.sql",
+ dbInfo->name, ntbName);
+ }
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
+
+ count = dumpNormalTable(
+ taos,
+ dbInfo->name,
+ NULL,
+ ntbName,
+ getPrecisionByString(dbInfo->precision),
+ fp);
+ if (count > 0) {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+ fclose(fp);
+ return count;
+}
+
+static int64_t dumpNormalTableBelongStb(
+ TAOS *taos,
+ SDbInfo *dbInfo, char *stbName, char *ntbName)
+{
+ int64_t count = 0;
+
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ FILE *fp = NULL;
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.sql",
+ g_args.outpath, dbInfo->name, ntbName);
+ } else {
+ sprintf(tmpBuf, "%s.%s.sql",
+ dbInfo->name, ntbName);
+ }
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
+
+ count = dumpNormalTable(
+ taos,
+ dbInfo->name,
+ stbName,
+ ntbName,
+ getPrecisionByString(dbInfo->precision),
+ fp);
+ if (count > 0) {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+
+ fclose(fp);
+ return count;
+}
+
+static void *dumpNtbOfDb(void *arg) {
+ threadInfo *pThreadInfo = (threadInfo *)arg;
+
+ debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
+ debugPrint("dump table count = \t%"PRId64"\n",
+ pThreadInfo->count);
+
+ FILE *fp = NULL;
+ char tmpBuf[MAX_PATH_LEN] = {0};
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%d.sql",
+ g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.%d.sql",
+ pThreadInfo->dbName, pThreadInfo->threadIndex);
+ }
+
+ fp = fopen(tmpBuf, "w");
+
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
+ }
+
+ int64_t count;
+ for (int64_t i = 0; i < pThreadInfo->count; i++) {
+ debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
+ pThreadInfo->threadIndex, i,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name);
+ count = dumpNormalTable(
+ pThreadInfo->taos,
+ pThreadInfo->dbName,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name,
+ pThreadInfo->precision,
+ fp);
+ if (count < 0) {
+ break;
+ } else {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+ }
+
+ fclose(fp);
+ return NULL;
+}
+
+static int checkParam() {
+ if (g_args.all_databases && g_args.databases) {
+ errorPrint("%s", "conflict option --all-databases and --databases\n");
+ return -1;
+ }
+
+ if (g_args.start_time > g_args.end_time) {
+ errorPrint("%s", "start time is larger than end time\n");
+ return -1;
+ }
+
+ if (g_args.arg_list_len == 0) {
+ if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
+ errorPrint("%s", "taosdump requires parameters\n");
+ return -1;
+ }
+ }
+ /*
+ if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
+ fprintf(stderr, "duplicate parameter input and output file path\n");
+ return -1;
+ }
+ */
+ if (!g_args.isDumpIn && g_args.encode != NULL) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
+
+ if (g_args.table_batch <= 0) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+static bool isEmptyCommand(char *cmd) {
+ char *pchar = cmd;
+
+ while (*pchar != '\0') {
+ if (*pchar != ' ') return false;
+ pchar++;
+ }
+
+ return true;
+}
+
+static void taosReplaceCtrlChar(char *str) {
+ bool ctrlOn = false;
+ char *pstr = NULL;
+
+ for (pstr = str; *str != '\0'; ++str) {
+ if (ctrlOn) {
+ switch (*str) {
+ case 'n':
+ *pstr = '\n';
+ pstr++;
+ break;
+ case 'r':
+ *pstr = '\r';
+ pstr++;
+ break;
+ case 't':
+ *pstr = '\t';
+ pstr++;
+ break;
+ case '\\':
+ *pstr = '\\';
+ pstr++;
+ break;
+ case '\'':
+ *pstr = '\'';
+ pstr++;
+ break;
+ default:
+ break;
+ }
+ ctrlOn = false;
+ } else {
+ if (*str == '\\') {
+ ctrlOn = true;
+ } else {
+ *pstr = *str;
+ pstr++;
+ }
+ }
+ }
+
+ *pstr = '\0';
+}
+*/
+
+char *ascii_literal_list[] = {
+ "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
+ "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
+ "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
+ "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
+ "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
+ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
+ "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
+ "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
+ "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
+ "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
+ "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
+ "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
+ "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
+ "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
+ "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
+ "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
+ "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
+ "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
+ "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
+ "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
+
+static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
+ pstr++;
+ size--;
}
+ *pbuf = '\0';
+ return 0;
+}
- count_temp = counter;
-
- for (; counter < numOfCols; counter++) {
- if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- } else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ wchar_t wc;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
+ if (byte_width < 0) {
+ errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
+ exit(-1);
}
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
+ if ((int)wc < 256) {
+ pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ } else {
+ memcpy(pbuf, pstr, byte_width);
+ pbuf += byte_width;
}
+ pstr += byte_width;
}
- pstr += sprintf(pstr, ");");
-
- debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
- return fprintf(fp, "%s\n\n", sqlstr);
-}
+ *pbuf = '\0';
-static int writeSchemaToAvro(char *jsonAvroSchema)
-{
- errorPrint("%s() LN%d, TODO: implement write schema to avro",
- __func__, __LINE__);
return 0;
}
-static int64_t writeResultToAvro(TAOS_RES *res)
-{
- errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
- return 0;
+static void dumpCharset(FILE *fp) {
+ char charsetline[256];
+
+ (void)fseek(fp, 0, SEEK_SET);
+ sprintf(charsetline, "#!%s\n", tsCharset);
+ (void)fwrite(charsetline, strlen(charsetline), 1, fp);
}
-static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
-{
- int64_t totalRows = 0;
+static void loadFileCharset(FILE *fp, char *fcharset) {
+ char * line = NULL;
+ size_t line_size = 0;
- int32_t sql_buf_len = g_args.max_sql_len;
- char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
- if (tmpBuffer == NULL) {
- errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
- return -1;
+ (void)fseek(fp, 0, SEEK_SET);
+ ssize_t size = getline(&line, &line_size, fp);
+ if (size <= 2) {
+ goto _exit_no_charset;
}
- char *pstr = tmpBuffer;
-
- TAOS_ROW row = NULL;
- int numFields = 0;
- int rowFlag = 0;
- int64_t lastRowsPrint = 5000000;
- int count = 0;
+ if (strncmp(line, "#!", 2) != 0) {
+ goto _exit_no_charset;
+ }
+ if (line[size - 1] == '\n') {
+ line[size - 1] = '\0';
+ size--;
+ }
+ strcpy(fcharset, line + 2);
- numFields = taos_field_count(res);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ tfree(line);
+ return;
- int32_t curr_sqlstr_len = 0;
- int32_t total_sqlstr_len = 0;
+_exit_no_charset:
+ (void)fseek(fp, 0, SEEK_SET);
+ *fcharset = '\0';
+ tfree(line);
+ return;
+}
- while ((row = taos_fetch_row(res)) != NULL) {
- curr_sqlstr_len = 0;
+// ======== dumpIn support multi threads functions ================================//
- int32_t* length = taos_fetch_lengths(res); // act len
+static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset,
+ char* encode, char* fileName) {
+ int read_len = 0;
+ char * cmd = NULL;
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
- if (count == 0) {
- total_sqlstr_len = 0;
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "INSERT INTO %s.%s VALUES (", dbName, tbName);
- } else {
- if (g_args.mysqlFlag) {
- if (0 == rowFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- rowFlag++;
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
- }
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- }
- }
+ cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
+ if (cmd == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- for (int col = 0; col < numFields; col++) {
- if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+ int lastRowsPrint = 5000000;
+ int lineNo = 0;
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ ++lineNo;
+ if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
+ line[--read_len] = '\0';
- if (row[col] == NULL) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
- continue;
- }
+ //if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ if (read_len == 0 ) {
+ continue;
+ }
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_INT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
- *((int64_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- {
- char tbuf[COMMAND_SIZE] = {0};
- converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_NCHAR:
- {
- char tbuf[COMMAND_SIZE] = {0};
- convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!g_args.mysqlFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
- *(int64_t *)row[col]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[col]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
- buf, (int)(ts % 1000));
- }
- break;
- default:
- break;
- }
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
}
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd[read_len + cmd_len]= '\0';
+ if (queryDbImpl(taos, cmd)) {
+ errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
+ __func__, __LINE__, lineNo, fileName);
+ fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
+ }
- totalRows++;
- count++;
- fprintf(fp, "%s", tmpBuffer);
+ memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
+ cmd_len = 0;
- if (totalRows >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
- totalRows, dbName, tbName);
+ if (lineNo >= lastRowsPrint) {
+ printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
}
+ }
- total_sqlstr_len += curr_sqlstr_len;
+ tfree(cmd);
+ tfree(line);
+ return 0;
+}
- if ((count >= g_args.data_batch)
- || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
- fprintf(fp, ";\n");
- count = 0;
+static void* dumpInSqlWorkThreadFp(void *arg)
+{
+ threadInfo *pThread = (threadInfo*)arg;
+ setThreadName("dumpInSqlWorkThrd");
+ fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n",
+ pThread->threadIndex, pThread->count, pThread->from);
+
+ for (int64_t i = 0; i < pThread->count; i++) {
+ char sqlFile[MAX_PATH_LEN];
+ sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]);
+
+ FILE* fp = openDumpInFile(sqlFile);
+ if (NULL == fp) {
+ errorPrint("[%d] Failed to open input file: %s\n",
+ pThread->threadIndex, sqlFile);
+ continue;
}
+
+ if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode,
+ sqlFile)) {
+ okPrint("[%d] Success dump in file: %s\n",
+ pThread->threadIndex, sqlFile);
+ }
+ fclose(fp);
}
- debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
+ return NULL;
+}
- fprintf(fp, "\n");
- atomic_add_fetch_64(&g_totalDumpOutRows, totalRows);
- free(tmpBuffer);
+static int dumpInSqlWorkThreads()
+{
+ int32_t threads = g_args.thread_num;
- return 0;
-}
+ uint64_t sqlFileCount = getFilesNum("sql");
+ if (0 == sqlFileCount) {
+ debugPrint("No .sql file found in %s\n", g_args.inpath);
+ return 0;
+ }
-static int64_t dumpTableData(FILE *fp, char *tbName,
- char* dbName, int precision,
- char *jsonAvroSchema) {
- int64_t totalRows = 0;
+ createDumpinList("sql", sqlFileCount);
+
+ threadInfo *pThread;
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = (threadInfo *)calloc(
+ threads, sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
+
+ int64_t a = sqlFileCount / threads;
+ if (a < 1) {
+ threads = sqlFileCount;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = sqlFileCount % threads;
+ }
- char sqlstr[1024] = {0};
+ int64_t from = 0;
- int64_t start_time, end_time;
- if (strlen(g_args.humanStartTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n", g_args.humanStartTime);
+ for (int32_t t = 0; t < threads; ++t) {
+ pThread = infos + t;
+ pThread->threadIndex = t;
+
+ pThread->from = from;
+ pThread->count = tcount;
+ verbosePrint(
+ "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n",
+ t, pThread->count, pThread->from);
+
+ pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ free(infos);
+ free(pids);
return -1;
}
- } else {
- start_time = g_args.start_time;
- }
- if (strlen(g_args.humanEndTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
- return -1;
+ if (pthread_create(pids + t, NULL,
+ dumpInSqlWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread[%d] failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(EXIT_FAILURE);
}
- } else {
- end_time = g_args.end_time;
}
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbName, start_time, end_time);
-
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbName, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbName);
- return -1;
+ for (int t = 0; t < threads; ++t) {
+ pthread_join(pids[t], NULL);
}
- TAOS_RES* res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("failed to run command %s, reason: %s\n",
- sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
+ for (int t = 0; t < threads; ++t) {
+ taos_close(infos[t].taos);
}
+ free(infos);
+ free(pids);
- if (g_args.avro) {
- writeSchemaToAvro(jsonAvroSchema);
- totalRows = writeResultToAvro(res);
- } else {
- totalRows = writeResultToSql(res, fp, dbName, tbName);
- }
+ freeFileList(g_tsDumpInSqlFiles, sqlFileCount);
- taos_free_result(res);
- taos_close(taos);
- return totalRows;
+ return 0;
}
-static int checkParam() {
- if (g_args.all_databases && g_args.databases) {
- errorPrint("%s", "conflict option --all-databases and --databases\n");
- return -1;
- }
+static int dumpInDbs()
+{
+ TAOS *taos = taos_connect(
+ g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
- if (g_args.start_time > g_args.end_time) {
- errorPrint("%s", "start time is larger than end time\n");
+ if (taos == NULL) {
+ errorPrint("%s() LN%d, failed to connect to TDengine server\n",
+ __func__, __LINE__);
return -1;
}
- if (g_args.arg_list_len == 0) {
- if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
- errorPrint("%s", "taosdump requires parameters\n");
- return -1;
- }
- }
- /*
- if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
- fprintf(stderr, "duplicate parameter input and output file path\n");
- return -1;
- }
- */
- if (!g_args.isDumpIn && g_args.encode != NULL) {
- fprintf(stderr, "invalid option in dump out\n");
+ char dbsSql[MAX_PATH_LEN];
+ sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql");
+
+ FILE *fp = openDumpInFile(dbsSql);
+ if (NULL == fp) {
+ errorPrint("%s() LN%d, failed to open input file %s\n",
+ __func__, __LINE__, dbsSql);
return -1;
}
+ debugPrint("Success Open input file: %s\n", dbsSql);
+ loadFileCharset(fp, g_tsCharset);
- if (g_args.table_batch <= 0) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
+ if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) {
+ okPrint("Success dump in file: %s !\n", dbsSql);
}
+ fclose(fp);
+ taos_close(taos);
+
return 0;
}
-/*
-static bool isEmptyCommand(char *cmd) {
- char *pchar = cmd;
+static int64_t dumpIn() {
+ assert(g_args.isDumpIn);
- while (*pchar != '\0') {
- if (*pchar != ' ') return false;
- pchar++;
- }
+ int64_t ret = 0;
+ if (dumpInDbs()) {
+ errorPrint("%s", "Failed to dump dbs in!\n");
+ exit(EXIT_FAILURE);
+ }
- return true;
+ ret = dumpInSqlWorkThreads();
+
+#ifdef AVRO_SUPPORT
+ if (0 == ret) {
+ ret = dumpInAvroWorkThreads();
+ }
+#endif
+
+ return ret;
}
-static void taosReplaceCtrlChar(char *str) {
- bool ctrlOn = false;
- char *pstr = NULL;
+static void *dumpNormalTablesOfStb(void *arg) {
+ threadInfo *pThreadInfo = (threadInfo *)arg;
- for (pstr = str; *str != '\0'; ++str) {
- if (ctrlOn) {
- switch (*str) {
- case 'n':
- *pstr = '\n';
- pstr++;
- break;
- case 'r':
- *pstr = '\r';
- pstr++;
- break;
- case 't':
- *pstr = '\t';
- pstr++;
- break;
- case '\\':
- *pstr = '\\';
- pstr++;
- break;
- case '\'':
- *pstr = '\'';
- pstr++;
- break;
- default:
- break;
- }
- ctrlOn = false;
- } else {
- if (*str == '\\') {
- ctrlOn = true;
- } else {
- *pstr = *str;
- pstr++;
- }
+ debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
+ debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count);
+
+ char command[COMMAND_SIZE];
+
+ sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
+ pThreadInfo->dbName, pThreadInfo->stbName,
+ pThreadInfo->count, pThreadInfo->from);
+
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ int32_t code = taos_errno(res);
+ if (code) {
+ errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return NULL;
}
- }
- *pstr = '\0';
-}
-*/
+ FILE *fp = NULL;
+ char tmpBuf[MAX_PATH_LEN] = {0};
-char *ascii_literal_list[] = {
- "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
- "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
- "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
- "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
- "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
- "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
- "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
- "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
- "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
- "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
- "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
- "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
- "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
- "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
- "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
- "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
- "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
- "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
- "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
- "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.%d.sql",
+ g_args.outpath,
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ pThreadInfo->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.%s.%d.sql",
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ pThreadInfo->threadIndex);
+ }
-static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- while (size > 0) {
- if (*pstr == '\0') break;
- pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
- pstr++;
- size--;
+ fp = fopen(tmpBuf, "w");
+
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
}
- *pbuf = '\0';
- return 0;
-}
-static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- wchar_t wc;
- while (size > 0) {
- if (*pstr == '\0') break;
- int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
- if (byte_width < 0) {
- errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
- exit(-1);
- }
+ TAOS_ROW row = NULL;
+ int64_t i = 0;
+ int64_t count;
+ while((row = taos_fetch_row(res)) != NULL) {
+ debugPrint("[%d] sub table %"PRId64": name: %s\n",
+ pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- if ((int)wc < 256) {
- pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ count = dumpNormalTable(
+ pThreadInfo->taos,
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ pThreadInfo->precision,
+ fp);
+ if (count < 0) {
+ break;
} else {
- memcpy(pbuf, pstr, byte_width);
- pbuf += byte_width;
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
- pstr += byte_width;
}
- *pbuf = '\0';
+ fclose(fp);
+ return NULL;
+}
+
+static int64_t dumpNtbOfDbByThreads(
+ SDbInfo *dbInfo,
+ int64_t ntbCount)
+{
+ if (ntbCount <= 0) {
+ return 0;
+ }
+
+ int threads = g_args.thread_num;
- return 0;
-}
+ int64_t a = ntbCount / threads;
+ if (a < 1) {
+ threads = ntbCount;
+ a = 1;
+ }
-static void dumpCharset(FILE *fp) {
- char charsetline[256];
+ assert(threads);
+ int64_t b = ntbCount % threads;
- (void)fseek(fp, 0, SEEK_SET);
- sprintf(charsetline, "#!%s\n", tsCharset);
- (void)fwrite(charsetline, strlen(charsetline), 1, fp);
-}
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ assert(pids);
+ assert(infos);
-static void loadFileCharset(FILE *fp, char *fcharset) {
- char * line = NULL;
- size_t line_size = 0;
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->taos = taos_connect(
+ g_args.host,
+ g_args.user,
+ g_args.password,
+ dbInfo->name,
+ g_args.port
+ );
+ if (NULL == pThreadInfo->taos) {
+ errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
+ __func__,
+ __LINE__,
+ taos_errstr(NULL));
+ free(pids);
+ free(infos);
- (void)fseek(fp, 0, SEEK_SET);
- ssize_t size = getline(&line, &line_size, fp);
- if (size <= 2) {
- goto _exit_no_charset;
+ return -1;
+ }
+
+ pThreadInfo->threadIndex = i;
+ pThreadInfo->count = (ifrom = (i==0)?0:
+ ((threadInfo *)(infos + i - 1))->from +
+ ((threadInfo *)(infos + i - 1))->count;
+ strcpy(pThreadInfo->dbName, dbInfo->name);
+ pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
+
+ pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
}
- if (strncmp(line, "#!", 2) != 0) {
- goto _exit_no_charset;
+ for (int64_t i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
}
- if (line[size - 1] == '\n') {
- line[size - 1] = '\0';
- size--;
+
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ taos_close(pThreadInfo->taos);
}
- strcpy(fcharset, line + 2);
- tfree(line);
- return;
+ free(pids);
+ free(infos);
-_exit_no_charset:
- (void)fseek(fp, 0, SEEK_SET);
- *fcharset = '\0';
- tfree(line);
- return;
+ return 0;
}
-// ======== dumpIn support multi threads functions ================================//
-
-static char **g_tsDumpInSqlFiles = NULL;
-static int32_t g_tsSqlFileNum = 0;
-static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
-static char g_tsCharset[64] = {0};
-
-static int taosGetFilesNum(const char *directoryName,
- const char *prefix, const char *prefix2)
+static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
{
- char cmd[1024] = { 0 };
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbInfo->name, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbInfo->name);
+ return 0;
+ }
- if (prefix2)
- sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ",
- directoryName, prefix, directoryName, prefix2);
- else
- sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
+ char command[COMMAND_SIZE];
+ TAOS_RES *result;
+ int32_t code;
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
+ sprintf(command, "USE %s", dbInfo->name);
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("invalid database %s, reason: %s\n",
+ dbInfo->name, taos_errstr(result));
+ taos_close(taos);
+ return 0;
}
- int fileNum = 0;
- if (fscanf(fp, "%d", &fileNum) != 1) {
- errorPrint("failed to execute:%s, parse result error\n", cmd);
- exit(-1);
+ sprintf(command, "SHOW TABLES");
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("Failed to show %s\'s tables, reason: %s\n",
+ dbInfo->name, taos_errstr(result));
+ taos_close(taos);
+ return 0;
}
- if (fileNum <= 0) {
- errorPrint("directory:%s is empty\n", directoryName);
- exit(-1);
+ g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
+ assert(g_tablesList);
+
+ TAOS_ROW row;
+ int64_t count = 0;
+ while(NULL != (row = taos_fetch_row(result))) {
+ debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
+ __func__, __LINE__,
+ count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ tstrncpy(((TableInfo *)(g_tablesList + count))->name,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
+ char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
+ if (stbName) {
+ tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
+ (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
+ ((TableInfo *)(g_tablesList + count))->belongStb = true;
+ }
+ count ++;
}
+ taos_close(taos);
+
+ int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
+
+ free(g_tablesList);
+ g_tablesList = NULL;
- pclose(fp);
- return fileNum;
+ return records;
}
-static void taosParseDirectory(const char *directoryName,
- const char *prefix, const char *prefix2,
- char **fileArray, int totalFiles)
+static int64_t dumpNtbOfStbByThreads(
+ SDbInfo *dbInfo, char *stbName)
{
- char cmd[1024] = { 0 };
+ int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
- if (prefix2) {
- sprintf(cmd, "ls %s/*.%s %s/*.%s | sort",
- directoryName, prefix, directoryName, prefix2);
- } else {
- sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ if (ntbCount <= 0) {
+ return 0;
}
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
- }
+ int threads = g_args.thread_num;
- int fileNum = 0;
- while (fscanf(fp, "%128s", fileArray[fileNum++])) {
- if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) {
- fileNum--;
- }
- if (fileNum >= totalFiles) {
- break;
- }
+ int64_t a = ntbCount / threads;
+ if (a < 1) {
+ threads = ntbCount;
+ a = 1;
}
- if (fileNum != totalFiles) {
- errorPrint("directory:%s changed while read\n", directoryName);
- pclose(fp);
- exit(-1);
- }
+ assert(threads);
+ int64_t b = ntbCount % threads;
- pclose(fp);
-}
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
-static void taosCheckDatabasesSQLFile(const char *directoryName)
-{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/dbs.sql", directoryName);
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->taos = taos_connect(
+ g_args.host,
+ g_args.user,
+ g_args.password,
+ dbInfo->name,
+ g_args.port
+ );
+ if (NULL == pThreadInfo->taos) {
+ errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
+ __func__,
+ __LINE__,
+ taos_errstr(NULL));
+ free(pids);
+ free(infos);
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
- }
+ return -1;
+ }
- while (fscanf(fp, "%128s", g_tsDbSqlFile)) {
- break;
- }
+ pThreadInfo->threadIndex = i;
+ pThreadInfo->count = (ifrom = (i==0)?0:
+ ((threadInfo *)(infos + i - 1))->from +
+ ((threadInfo *)(infos + i - 1))->count;
+ strcpy(pThreadInfo->dbName, dbInfo->name);
+ pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
- pclose(fp);
-}
+ strcpy(pThreadInfo->stbName, stbName);
+ pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
+ }
-static void taosMallocDumpFiles()
-{
- g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
- for (int i = 0; i < g_tsSqlFileNum; i++) {
- g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ for (int64_t i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
}
-}
-static void freeDumpFiles()
-{
- for (int i = 0; i < g_tsSqlFileNum; i++) {
- tfree(g_tsDumpInSqlFiles[i]);
+ int64_t records = 0;
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ records += pThreadInfo->rowsOfDumpOut;
+ taos_close(pThreadInfo->taos);
}
- tfree(g_tsDumpInSqlFiles);
+
+ free(pids);
+ free(infos);
+
+ return records;
}
-static void taosGetDirectoryFileList(char *inputDir)
+static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
{
- struct stat fileStat;
- if (stat(inputDir, &fileStat) < 0) {
- errorPrint("%s not exist\n", inputDir);
- exit(-1);
- }
+ dumpCreateDbClause(dbInfo, g_args.with_property, fp);
- if (fileStat.st_mode & S_IFDIR) {
- taosCheckDatabasesSQLFile(inputDir);
- if (g_args.avro)
- g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
- else
- g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ dbInfo->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
- int tsSqlFileNumOfTbls = g_tsSqlFileNum;
- if (g_tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
- }
- taosMallocDumpFiles();
- if (0 != tsSqlFileNumOfTbls) {
- if (g_args.avro) {
- taosParseDirectory(inputDir, "sql", "avro",
- g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
- } else {
- taosParseDirectory(inputDir, "sql", NULL,
- g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
- }
- }
- fprintf(stdout, "\nstart to dispose %d files in %s\n",
- g_tsSqlFileNum, inputDir);
- } else {
- errorPrint("%s is not a directory\n", inputDir);
- exit(-1);
- }
-}
+ dumpCreateSTableClauseOfDb(dbInfo, fp);
-static FILE* taosOpenDumpInFile(char *fptr) {
- wordexp_t full_path;
+ return dumpNTablesOfDb(dbInfo);
+}
- if (wordexp(fptr, &full_path, 0) != 0) {
- errorPrint("illegal file name: %s\n", fptr);
- return NULL;
- }
+static int dumpOut() {
+ TAOS *taos = NULL;
+ TAOS_RES *result = NULL;
- char *fname = full_path.we_wordv[0];
+ TAOS_ROW row;
+ FILE *fp = NULL;
+ int32_t count = 0;
- FILE *f = NULL;
- if ((fname) && (strlen(fname) > 0)) {
- f = fopen(fname, "r");
- if (f == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, fname);
- }
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
+ } else {
+ sprintf(tmpBuf, "dbs.sql");
}
- wordfree(&full_path);
- return f;
-}
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
-static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset,
- char* encode, char* fileName) {
- int read_len = 0;
- char * cmd = NULL;
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
+ g_args.dumpDbCount = getDumpDbCount();
+ debugPrint("%s() LN%d, dump db count: %d\n",
+ __func__, __LINE__, g_args.dumpDbCount);
- cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
- if (cmd == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
- __func__, __LINE__);
+ if (0 == g_args.dumpDbCount) {
+ errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
+ fclose(fp);
return -1;
}
- int lastRowsPrint = 5000000;
- int lineNo = 0;
- while ((read_len = getline(&line, &line_len, fp)) != -1) {
- ++lineNo;
- if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
- line[--read_len] = '\0';
+ g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
+ if (g_dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ goto _exit_failure;
+ }
- //if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
- continue;
- }
+ char command[COMMAND_SIZE];
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
+ /* Connect to server */
+ taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ goto _exit_failure;
+ }
- memcpy(cmd + cmd_len, line, read_len);
- cmd[read_len + cmd_len]= '\0';
- if (queryDbImpl(taos, cmd)) {
- errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
- __func__, __LINE__, lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
- }
+ /* --------------------------------- Main Code -------------------------------- */
+ /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
+ /* */
+ dumpCharset(fp);
- memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
+ sprintf(command, "show databases");
+ result = taos_query(taos, command);
+ int32_t code = taos_errno(result);
- if (lineNo >= lastRowsPrint) {
- printf(" %d lines already be executed from file %s\n", lineNo, fileName);
- lastRowsPrint += 5000000;
- }
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(result));
+ goto _exit_failure;
}
- tfree(cmd);
- tfree(line);
- fclose(fp);
- return 0;
-}
+ TAOS_FIELD *fields = taos_fetch_fields(result);
-static void* dumpInWorkThreadFp(void *arg)
-{
- threadInfo *pThread = (threadInfo*)arg;
- setThreadName("dumpInWorkThrd");
+ while ((row = taos_fetch_row(result)) != NULL) {
+ // sys database name : 'log', but subsequent version changed to 'log'
+ if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ && (!g_args.allow_sys)) {
+ continue;
+ }
- for (int32_t f = 0; f < g_tsSqlFileNum; ++f) {
- if (f % pThread->totalThreads == pThread->threadIndex) {
- char *SQLFileName = g_tsDumpInSqlFiles[f];
- FILE* fp = taosOpenDumpInFile(SQLFileName);
- if (NULL == fp) {
+ if (g_args.databases) { // input multi dbs
+ if (inDatabasesSeq(
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
continue;
}
- fprintf(stderr, ", Success Open input file: %s\n",
- SQLFileName);
- dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName);
+ } else if (!g_args.all_databases) { // only input one db
+ if (strncasecmp(g_args.arg_list[0],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
+ continue;
}
- }
- return NULL;
-}
-
-static void startDumpInWorkThreads()
-{
- pthread_attr_t thattr;
- threadInfo *pThread;
- int32_t totalThreads = g_args.thread_num;
-
- if (totalThreads > g_tsSqlFileNum) {
- totalThreads = g_tsSqlFileNum;
- }
+ g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (g_dbInfos[count] == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
+ goto _exit_failure;
+ }
- threadInfo *threadObj = (threadInfo *)calloc(
- totalThreads, sizeof(threadInfo));
+ okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
+ tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ min(TSDB_DB_NAME_LEN,
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
+ if (g_args.with_property) {
+ g_dbInfos[count]->ntables =
+ *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ g_dbInfos[count]->vgroups =
+ *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ g_dbInfos[count]->replica =
+ *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ g_dbInfos[count]->quorum =
+ *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ g_dbInfos[count]->days =
+ *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- if (NULL == threadObj) {
- errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__);
- }
+ tstrncpy(g_dbInfos[count]->keeplist,
+ (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
+ //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
+ //g_dbInfos[count]->daysToKeep1;
+ //g_dbInfos[count]->daysToKeep2;
+ g_dbInfos[count]->cache =
+ *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ g_dbInfos[count]->blocks =
+ *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ g_dbInfos[count]->minrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ g_dbInfos[count]->maxrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ g_dbInfos[count]->wallevel =
+ *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ g_dbInfos[count]->fsync =
+ *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ g_dbInfos[count]->comp =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ g_dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- for (int32_t t = 0; t < totalThreads; ++t) {
- pThread = threadObj + t;
- pThread->threadIndex = t;
- pThread->totalThreads = totalThreads;
- pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (pThread->taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- free(threadObj);
- return;
+ tstrncpy(g_dbInfos[count]->precision,
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ DB_PRECISION_LEN);
+ g_dbInfos[count]->update =
+ *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ count++;
- if (pthread_create(&(pThread->threadID), &thattr,
- dumpInWorkThreadFp, (void*)pThread) != 0) {
- errorPrint("%s() LN%d, thread:%d failed to start\n",
- __func__, __LINE__, pThread->threadIndex);
- exit(0);
+ if (g_args.databases) {
+ if (count > g_args.dumpDbCount)
+ break;
+ } else if (!g_args.all_databases) {
+ if (count >= 1)
+ break;
}
}
- for (int t = 0; t < totalThreads; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
-
- for (int t = 0; t < totalThreads; ++t) {
- taos_close(threadObj[t].taos);
+ if (count == 0) {
+ errorPrint("%d databases valid to dump\n", count);
+ goto _exit_failure;
}
- free(threadObj);
-}
-
-static int dumpIn() {
- assert(g_args.isDumpIn);
- TAOS *taos = NULL;
- FILE *fp = NULL;
+ if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
+ for (int i = 0; i < count; i++) {
+ int64_t records = 0;
+ records = dumpWholeDatabase(g_dbInfos[i], fp);
+ if (records >= 0) {
+ okPrint("Database %s dumped\n", g_dbInfos[i]->name);
+ g_totalDumpOutRows += records;
+ }
+ }
+ } else {
+ if (1 == g_args.arg_list_len) {
+ int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
+ if (records >= 0) {
+ okPrint("Database %s dumped\n", g_dbInfos[0]->name);
+ g_totalDumpOutRows += records;
+ }
+ } else {
+ dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ }
- taos = taos_connect(
- g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (taos == NULL) {
- errorPrint("%s() LN%d, failed to connect to TDengine server\n",
- __func__, __LINE__);
- return -1;
- }
+ int superTblCnt = 0 ;
+ for (int i = 1; g_args.arg_list[i]; i++) {
+ TableRecordInfo tableRecordInfo;
- taosGetDirectoryFileList(g_args.inpath);
+ if (getTableRecordInfo(g_dbInfos[0]->name,
+ g_args.arg_list[i],
+ &tableRecordInfo) < 0) {
+ errorPrint("input the invalid table %s\n",
+ g_args.arg_list[i]);
+ continue;
+ }
- int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum;
- if (g_tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ int64_t records = 0;
+ if (tableRecordInfo.isStb) { // dump all table of this stable
+ int ret = dumpStableClasuse(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ fp);
+ if (ret >= 0) {
+ superTblCnt++;
+ records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
+ }
+ } else if (tableRecordInfo.belongStb){
+ dumpStableClasuse(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ fp);
+ records = dumpNormalTableBelongStb(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ g_args.arg_list[i]);
+ } else {
+ records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]);
+ }
- fp = taosOpenDumpInFile(g_tsDbSqlFile);
- if (NULL == fp) {
- errorPrint("%s() LN%d, failed to open input file %s\n",
- __func__, __LINE__, g_tsDbSqlFile);
- return -1;
+ if (records >= 0) {
+ okPrint("table: %s dumped\n", g_args.arg_list[i]);
+ g_totalDumpOutRows += records;
+ }
}
- fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
-
- loadFileCharset(fp, g_tsCharset);
-
- dumpInOneFile(taos, fp, g_tsCharset, g_args.encode,
- g_tsDbSqlFile);
}
taos_close(taos);
- if (0 != tsSqlFileNumOfTbls) {
- startDumpInWorkThreads();
- }
-
- freeDumpFiles();
+ /* Close the handle and return */
+ fclose(fp);
+ taos_free_result(result);
+ freeDbInfos();
+ fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return 0;
+
+_exit_failure:
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ freeDbInfos();
+ errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return -1;
}
int main(int argc, char *argv[]) {
@@ -2988,7 +3859,10 @@ int main(int argc, char *argv[]) {
printf("databasesSeq: %s\n", g_args.databasesSeq);
printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
printf("with_property: %s\n", g_args.with_property?"true":"false");
+#ifdef AVRO_SUPPORT
printf("avro format: %s\n", g_args.avro?"true":"false");
+ printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
+#endif
printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time);
@@ -3042,7 +3916,10 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
+#ifdef AVRO_SUPPORT
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
+ fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
+#endif
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
@@ -3072,6 +3949,7 @@ int main(int argc, char *argv[]) {
tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (dumpIn() < 0) {
+ errorPrint("%s\n", "dumpIn() failed!");
ret = -1;
}
} else {
@@ -3103,4 +3981,3 @@ int main(int argc, char *argv[]) {
return ret;
}
-
diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt
index 4cf444bab2f05816c1af55d96156334800d758d5..0d085fc6f03cac8b2a76132008848a0c1542b325 100644
--- a/src/plugins/CMakeLists.txt
+++ b/src/plugins/CMakeLists.txt
@@ -1,26 +1,6 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
-if(NOT WIN32)
- string(ASCII 27 Esc)
- set(ColourReset "${Esc}[m")
- set(ColourBold "${Esc}[1m")
- set(Red "${Esc}[31m")
- set(Green "${Esc}[32m")
- set(Yellow "${Esc}[33m")
- set(Blue "${Esc}[34m")
- set(Magenta "${Esc}[35m")
- set(Cyan "${Esc}[36m")
- set(White "${Esc}[37m")
- set(BoldRed "${Esc}[1;31m")
- set(BoldGreen "${Esc}[1;32m")
- set(BoldYellow "${Esc}[1;33m")
- set(BoldBlue "${Esc}[1;34m")
- set(BoldMagenta "${Esc}[1;35m")
- set(BoldCyan "${Esc}[1;36m")
- set(BoldWhite "${Esc}[1;37m")
-endif()
-
ADD_SUBDIRECTORY(monitor)
IF (TD_BUILD_HTTP)
@@ -57,6 +37,8 @@ ELSE ()
DEPENDS taos
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config"
+ PATCH_COMMAND
+ COMMAND git clean -f -d
BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}"
INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/
)
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index ccbcc985118b132369a1ee3895f4341e6cca6d59..f26a4b4c8bdda05f801075b70c1b762882adfd27 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) {
TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext;
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext,
- sizeof(TSDB_CACHE_PTR_TYPE), 3000);
+ sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive);
pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h
index 258a29b90b40f4a5a630c17328a927923e1f1be6..6b1bccf6e54685c2c5d2106bace50643b9c8028f 100644
--- a/src/util/inc/tconfig.h
+++ b/src/util/inc/tconfig.h
@@ -20,7 +20,8 @@
extern "C" {
#endif
-#define TSDB_CFG_MAX_NUM 128
+
+#define TSDB_CFG_MAX_NUM 129
#define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41
diff --git a/tests/pytest/insert/special_character_show.py b/tests/pytest/insert/special_character_show.py
index 3b2df5c87380c22fb18cbee06c866249b4365a70..ce9f1de76aa5896beb3aa78dce8a3a65a81a973c 100644
--- a/tests/pytest/insert/special_character_show.py
+++ b/tests/pytest/insert/special_character_show.py
@@ -31,9 +31,8 @@ class TDTestCase:
tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
- tdLog.info('=============== step2,create table增加了转义字符')
+ tdLog.info('=============== step2,create table with escape character')
tdLog.info('create table tb1 using stb1 tags("abc\\"def")')
- #增加了转义字符\
tdSql.execute('create table tb1 using stb1 tags("abc\\"def")')
tdLog.info('=============== step3,insert data')