diff --git a/.gitmodules b/.gitmodules index dbb02d4ef7ed65d11418e271cac7e61b95c2a482..4b0b8dcab54c3dcd0bdbd75a4f4a2871ce3218a7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,6 @@ [submodule "src/plugins/blm3"] path = src/plugins/blm3 url = https://github.com/taosdata/blm3 +[submodule "deps/avro"] + path = deps/avro + url = https://github.com/apache/avro diff --git a/CMakeLists.txt b/CMakeLists.txt index 75f98f96bcb26ae12fd32b56f2533db3001c6ae5..547455d07b6ba25ac58ae5e4851c5cd5b08e3c60 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,6 +15,26 @@ ELSE () CMAKE_MINIMUM_REQUIRED(VERSION 3.0) ENDIF () +if(NOT WIN32) + string(ASCII 27 Esc) + set(ColourReset "${Esc}[m") + set(ColourBold "${Esc}[1m") + set(Red "${Esc}[31m") + set(Green "${Esc}[32m") + set(Yellow "${Esc}[33m") + set(Blue "${Esc}[34m") + set(Magenta "${Esc}[35m") + set(Cyan "${Esc}[36m") + set(White "${Esc}[37m") + set(BoldRed "${Esc}[1;31m") + set(BoldGreen "${Esc}[1;32m") + set(BoldYellow "${Esc}[1;33m") + set(BoldBlue "${Esc}[1;34m") + set(BoldMagenta "${Esc}[1;35m") + set(BoldCyan "${Esc}[1;36m") + set(BoldWhite "${Esc}[1;37m") +endif() + SET(TD_ACCOUNT FALSE) SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) diff --git a/Jenkinsfile b/Jenkinsfile index f0f3e0d122ad470cce0ef9586e01fe9431ccfa8d..ecf708037f1243f553ca3fbd79c10d58fbb0d496 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,7 +107,77 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python/ || echo "not install" + pip3 install ${WKC}/src/connector/python/ + ''' + return 1 +} +def pre_test_noinstall(){ + sh'hostname' + sh''' + cd ${WKC} + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WKC} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WKC} + git checkout develop + ''' + } + } + sh''' + cd ${WKC} + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + git clean -dfx + git submodule update --init --recursive + cd ${WK} + git reset --hard HEAD~10 + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WK} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WK} + git checkout develop + ''' + } + } + sh ''' + cd ${WK} + git pull >/dev/null + + export TZ=Asia/Harbin + date + git clean -dfx + mkdir debug + cd debug + cmake .. > /dev/null + make ''' return 1 } @@ -460,31 +530,55 @@ pipeline { stage('arm64centos7') { agent{label " arm64centos7 "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64centos8') { agent{label " arm64centos8 "} steps { - pre_test() + pre_test_noinstall() } } stage('arm32bionic') { agent{label " arm32bionic "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64bionic') { agent{label " arm64bionic "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64focal') { agent{label " arm64focal "} steps { - pre_test() + pre_test_noinstall() + } + } + stage('centos7') { + agent{label " centos7 "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:trusty') { + agent{label " trusty "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:xenial') { + agent{label " xenial "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:bionic') { + agent{label " bionic "} + steps { + pre_test_noinstall() } } diff --git a/cmake/define.inc b/cmake/define.inc index bb6b285f268a6476c79fb599e76b1fd0435173b5..8d1d3f306febf481140f270f55ef2cd45de01db9 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -128,7 +128,6 @@ IF (TD_APLHINE) MESSAGE(STATUS "aplhine is defined") ENDIF () -MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP}) IF ("${BUILD_HTTP}" STREQUAL "") IF (TD_LINUX) IF (TD_ARM_32) @@ -140,16 +139,27 @@ IF ("${BUILD_HTTP}" STREQUAL "") SET(BUILD_HTTP "true") ENDIF () ENDIF () -MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP}) IF (${BUILD_HTTP} MATCHES "true") SET(TD_BUILD_HTTP TRUE) +ELSEIF (${BUILD_HTTP} MATCHES "false") + SET(TD_BUILD_HTTP FALSE) ENDIF () IF (TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF () +IF ("${AVRO_SUPPORT}" MATCHES "true") + SET(TD_AVRO_SUPPORT TRUE) +ELSEIF ("${AVRO_SUPPORT}" MATCHES "false") + SET(TD_AVRO_SUPPORT FALSE) +ENDIF () + +IF (TD_AVRO_SUPPORT) + ADD_DEFINITIONS(-DAVRO_SUPPORT) +ENDIF () + IF (TD_LINUX) ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_LINUX) diff --git a/cmake/input.inc b/cmake/input.inc index 5bd1a7bed6fe9b0c7dc51c46870d8109462eae81..d18aa56ce1c684cd54286421c975ddf485129cb5 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -92,6 +92,8 @@ ENDIF () SET(TD_BUILD_HTTP FALSE) +SET(TD_AVRO_SUPPORT FALSE) + SET(TD_MEMORY_SANITIZER FALSE) IF (${MEMORY_SANITIZER} MATCHES "true") SET(TD_MEMORY_SANITIZER TRUE) diff --git a/cmake/install.inc b/cmake/install.inc index 9ecd9bcd4fa722dd039170ef30220679cedf65b1..c90aa3f9511e416106309e603853028e7096f082 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -35,7 +35,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.36-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index 1d3b25e9237ef507811fa234dda4211acd6eb885..2405f84104ebd7597d2e509034847eb78d31aabc 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.3.0.0") + SET(TD_VER_NUMBER "2.3.1.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 45828245e2d541114a2ae0a287e0c6acbd0d42be..773a791a2527712270f569d5c04aa7f8ef066e40 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) ENDIF () +IF (TD_AVRO_SUPPORT) + MESSAGE("") + MESSAGE("${Green} ENABLE avro format support ${ColourReset}") + MESSAGE("") + include(ExternalProject) + ExternalProject_Add( + apache-avro + PREFIX "avro" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c + BUILD_IN_SOURCE 1 + PATCH_COMMAND + COMMAND git clean -f -d + COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build + ) +ELSE () + MESSAGE("") + MESSAGE("${Yellow} NO avro format support ${ColourReset}") + MESSAGE("") +ENDIF () + IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + MESSAGE("") + MESSAGE("${Green} ENABLE jemalloc ${ColourReset}") + MESSAGE("") MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) - include(ExternalProject) ExternalProject_Add(jemalloc PREFIX "jemalloc" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc @@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED) ENDIF () IF (${TSZ_ENABLED} MATCHES "true") - ADD_SUBDIRECTORY(TSZ) -ENDIF() \ No newline at end of file + ADD_SUBDIRECTORY(TSZ) +ENDIF() diff --git a/deps/TSZ b/deps/TSZ deleted file mode 160000 index 0ca5b15a8eac40327dd737be52c926fa5675712c..0000000000000000000000000000000000000000 --- a/deps/TSZ +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c diff --git a/deps/avro b/deps/avro new file mode 160000 index 0000000000000000000000000000000000000000..a1fce29d9675b4dd95dfee9db32cc505d0b2227c --- /dev/null +++ b/deps/avro @@ -0,0 +1 @@ +Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index 799cfc14a300d3f4c9fcbf8537f04984ae8e1df4..bc3259365d0b658184318e994ffd31a9e4ffee90 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -3,7 +3,7 @@ ## Grafana -TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 +TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。 ### 安装Grafana @@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ### 配置Grafana -TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。 - -以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 +TDengine 的 Grafana 插件请从 下载。 ```bash -sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine +GF_VERSION=3.1.1 +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` -Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: +以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 + +```bash +sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ ``` + +Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: + +```ini [plugins] -enable_alpha = true -allow_loading_unsigned_plugins = taosdata-tdengine-datasource +allow_loading_unsigned_plugins = tdengine-datasource ``` ### 使用 Grafana @@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource * ALIAS BY:可设置当前查询别名。 * GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 - 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: ![img](../images/connections/create_dashboard2.jpg) @@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource #### 导入 Dashboard -在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。 +我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 -点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件: +点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载: ![img](../images/connections/import_dashboard1.jpg) 导入完成之后可看到如下效果: -![img](../images/connections/import_dashboard2.jpg) - +![img](../images/connections/dashboard-15146.png) ## MATLAB diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md index 4bdcd52d62f8c3a95bc91261b77242e5263a8f23..04765602dab18fbacf7d92d44ca324db660c0ac4 100644 --- a/documentation20/cn/14.devops/01.telegraf/docs.md +++ b/documentation20/cn/14.devops/01.telegraf/docs.md @@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ## 数据链路设置 -### 复制 TDengine 插件到 grafana 插件目录 -``` -1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine -2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini -4. sudo systemctl restart grafana-server.service +### 下载 TDengine 插件到 grafana 插件目录 + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service ``` ### 修改 /etc/telegraf/telegraf.conf @@ -61,7 +63,7 @@ sudo systemctl start telegraf 使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。 -点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘: +点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: ![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md index 2a031d63e55ed7888332757170b781beae787ff7..a35772bb498d426a1f44a9e7eb0bea61b51f92a5 100644 --- a/documentation20/cn/14.devops/02.collectd/docs.md +++ b/documentation20/cn/14.devops/02.collectd/docs.md @@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ## 数据链路设置 ### 复制 TDengine 插件到 grafana 插件目录 -``` -1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine -2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini -4. sudo systemctl restart grafana-server.service + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service ``` ### 配置 collectd @@ -62,13 +64,13 @@ repeater 部分添加 { host:'', port: . + +```bash +GF_VERSION=3.1.1 +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +``` Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. ```bash -sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine +sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/ ``` ### Use Grafana @@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp #### Import Dashboard -A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`. +We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 -Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file: +Click the `Import` button on the left panel and load the grafana id: ![img](../images/connections/import_dashboard1.jpg) You can see as follows after Dashboard imported. -![img](../images/connections/import_dashboard2.jpg) +![img](../images/connections/dashboard-15146.png) ## MATLAB diff --git a/documentation20/en/images/connections/dashboard-15146.png b/documentation20/en/images/connections/dashboard-15146.png new file mode 100644 index 0000000000000000000000000000000000000000..3eb240ad8ad648953e32f27e674e2a9171ed9af8 Binary files /dev/null and b/documentation20/en/images/connections/dashboard-15146.png differ diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 48f0bee6b34496603d67f74938857d7bb94627f2..e42212ff0f55420dfa5f23638a69439be795e43a 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -203,6 +203,9 @@ keepColumnName 1 # database name must be specified in restful interface if the following parameter is set, off by default # httpDbNameMandatory 1 +# http keep alive, default is 30 seconds +# httpKeepAlive 30000 + # The following parameter is used to limit the maximum number of lines in log files. # max number of lines per log filters # numOfLogLines 10000000 diff --git a/packaging/check_package.sh b/packaging/check_package.sh index edc98da65e5574b91efbce16f4df0fd042b18c13..9728f9b964732195970708fbf9fb61361768143b 100755 --- a/packaging/check_package.sh +++ b/packaging/check_package.sh @@ -128,12 +128,12 @@ function check_link() { function check_main_path() { #check install main dir and all sub dir main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d") - for i in ${main_dir[@]};do + for i in "${main_dir[@]}";do check_file ${install_main_dir} $i done if [ "$verMode" == "cluster" ]; then nginx_main_dir=("admin" "conf" "html" "sbin" "logs") - for i in ${nginx_main_dir[@]};do + for i in "${nginx_main_dir[@]}";do check_file ${nginx_dir} $i done fi @@ -143,11 +143,11 @@ function check_main_path() { function check_bin_path() { # check install bin dir and all sub dir bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") - for i in ${bin_dir[@]};do + for i in "${bin_dir[@]}";do check_file ${sbin_dir} $i done lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") - for i in ${lbin_dir[@]};do + for i in "${lbin_dir[@]}";do check_link ${bin_link_dir}/$i done if [ "$verMode" == "cluster" ]; then @@ -171,7 +171,7 @@ function check_lib_path() { function check_header_path() { # check all header header_dir=("taos.h" "taoserror.h") - for i in ${header_dir[@]};do + for i in "${header_dir[@]}";do check_link ${inc_link_dir}/$i done echo -e "Check bin path:\033[32mOK\033[0m!" @@ -179,7 +179,8 @@ function check_header_path() { function check_blm3_config_dir() { # check all config - check_file ${cfg_install_dir} blm3.toml + check_file ${cfg_install_dir} blm.toml + check_file ${cfg_install_dir} blm3.service check_file ${install_main_dir}/cfg blm.toml.org echo -e "Check conf path:\033[32mOK\033[0m!" } diff --git a/packaging/deb/DEBIAN/control b/packaging/deb/DEBIAN/control index c01640d7e9adb4f7f8d6eb29f06008480dc8eee4..fd3f81ba082d11f6ff3979382a63597b5806fa1f 100644 --- a/packaging/deb/DEBIAN/control +++ b/packaging/deb/DEBIAN/control @@ -11,4 +11,3 @@ Maintainer: support@taosdata.com Provides: taosdata Homepage: http://taosdata.com Description: Big Data Platform Designed and Optimized for IoT. - diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst index 55218b471669887bd0d4066bb9ef91bf1f195031..aaa052639ba5a95884accdf9c09a9351a0400cc5 100644 --- a/packaging/deb/DEBIAN/preinst +++ b/packaging/deb/DEBIAN/preinst @@ -32,5 +32,9 @@ if [ -f "${install_main_dir}/blm.toml" ]; then ${csudo} rm -f ${install_main_dir}/cfg/blm.toml || : fi +if [ -f "${install_main_dir}/blm3.service" ]; then + ${csudo} rm -f ${install_main_dir}/cfg/blm3.service || : +fi + # there can not libtaos.so*, otherwise ln -s error ${csudo} rm -f ${install_main_dir}/driver/libtaos* || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 2c18cec497c0a741c96f13afb06794e26e8eaf1c..cefdcdb1f1aab081ac286ecf199539abd7fcfa3b 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -47,6 +47,9 @@ cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_pat if [ -f "${compile_dir}/test/cfg/blm.toml" ]; then cp ${compile_dir}/test/cfg/blm.toml ${pkg_dir}${install_home_path}/cfg fi +if [ -f "${compile_dir}/test/cfg/blm3.service" ]; then + cp ${compile_dir}/test/cfg/blm3.service ${pkg_dir}${install_home_path}/cfg ||: +fi cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script @@ -68,19 +71,24 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples -if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then - cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin -else - echo "grafanaplugin bundled directory not found!" - exit 1 -fi cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: +install_user_local_path="/usr/local" + +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + mkdir -p ${pkg_dir}${install_user_local_path}/lib + cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/ + ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23 + ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so +fi +if [ -f ${compile_dir}/build/lib/libavro.a ]; then + cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/ +fi + if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then - install_user_local_path="/usr/local" mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then @@ -120,6 +128,10 @@ chmod 755 ${pkg_dir}/DEBIAN/* debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control +fi + #get taos version, then set deb name @@ -151,4 +163,3 @@ cp ${pkg_dir}/*.deb ${output_dir} # clean tmep dir rm -rf ${pkg_dir} - diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index e4d3cda7f29fea96cabfe48f5b10ab668a085ea8..9f60b840d68577b751314e7ddecc9da98c20f8d6 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -45,24 +45,32 @@ echo "version=${version}" #docker manifest rm tdengine/tdengine:${version} if [ "$verType" == "beta" ]; then docker manifest inspect tdengine/tdengine-beta:latest + docker manifest inspect tdengine/tdengine-beta:${version} + docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest + sleep 30 + docker manifest rm tdengine/tdengine-beta:${version} docker manifest rm tdengine/tdengine-beta:latest docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password - docker manifest push tdengine/tdengine-beta:latest docker manifest push tdengine/tdengine-beta:${version} - + docker manifest push tdengine/tdengine-beta:latest elif [ "$verType" == "stable" ]; then docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} + docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest + sleep 30 docker manifest rm tdengine/tdengine:latest + docker manifest rm tdengine/tdengine:${version} + docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password - docker manifest push tdengine/tdengine:latest docker manifest push tdengine/tdengine:${version} - + docker manifest push tdengine/tdengine:latest else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/packaging/release.sh b/packaging/release.sh index 705103a87a35a73b2a91079707785279416644cd..b9fe25ec08e8dcd1170867fa20f4a4fe5a1ef2d1 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -151,7 +151,7 @@ function vercomp () { } # 1. check version information -if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then +if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then echo "please enter correct version" exit 0 fi @@ -213,7 +213,7 @@ else exit 1 fi -make -j8 +make -j8 && ${csudo} make install cd ${curr_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 4cc7daf1a4cd15d06db084faf23dd4fcb15a955d..42ceeb791b6154f7d22a477bf3b3c3b8c726869c 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -32,20 +32,20 @@ if command -v sudo > /dev/null; then fi function cp_rpm_package() { -local cur_dir -cd $1 -cur_dir=$(pwd) - -for dirlist in $(ls ${cur_dir}); do - if test -d ${dirlist}; then - cd ${dirlist} - cp_rpm_package ${cur_dir}/${dirlist} - cd .. - fi - if test -e ${dirlist}; then - cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm - fi -done + local cur_dir + cd $1 + cur_dir=$(pwd) + + for dirlist in "$(ls ${cur_dir})"; do + if test -d ${dirlist}; then + cd ${dirlist} + cp_rpm_package ${cur_dir}/${dirlist} + cd .. + fi + if test -e ${dirlist}; then + cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm + fi + done } if [ -d ${pkg_dir} ]; then @@ -56,6 +56,10 @@ cd ${pkg_dir} ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file} +fi + ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} # copy rpm package to output_dir, and modify package name, then clean temp dir diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 19fe23d194be2266bcb68034e3c4fd90d9824f3d..85aa7e072476b089352d3e5da4d2abc801d8e24b 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -57,6 +57,9 @@ cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg if [ -f %{_compiledir}/test/cfg/blm.toml ]; then cp %{_compiledir}/test/cfg/blm.toml %{buildroot}%{homepath}/cfg fi +if [ -f %{_compiledir}/test/cfg/blm3.service ]; then + cp %{_compiledir}/test/cfg/blm3.service %{buildroot}%{homepath}/cfg +fi cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script @@ -73,18 +76,20 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include -if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then - cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin -else - echo grafanaplugin bundled directory not found! - exit 1 -fi cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples +if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then + cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver + ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23 + ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so +fi +if [ -f %{_compiledir}/build/lib/libavro.a ]; then + cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver +fi if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then mkdir -p %{buildroot}%{userlocalpath}/bin @@ -151,16 +156,21 @@ if pidof taosd &> /dev/null; then echo "Stop taosd service success!" sleep 1 fi -# if taos.cfg already softlink, remove it +# if taos.cfg already exist, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then ${csudo} rm -f %{homepath}/cfg/taos.cfg || : fi -# if blm.toml already softlink, remove it +# if blm.toml already exist, remove it if [ -f %{cfg_install_dir}/blm.toml ]; then ${csudo} rm -f %{homepath}/cfg/blm.toml || : fi +# if blm3.service already softlink, remove it +if [ -f %{cfg_install_dir}/blm3.service ]; then + ${csudo} rm -f %{homepath}/cfg/blm3.service || : +fi + # there can not libtaos.so*, otherwise ln -s error ${csudo} rm -f %{homepath}/driver/libtaos* || : diff --git a/packaging/tools/check_os.sh b/packaging/tools/check_os.sh index 92522f7b82e166c1d6ec365619869ad68969155c..cc8c6e0e9366232deb9013db62b29afebd179135 100755 --- a/packaging/tools/check_os.sh +++ b/packaging/tools/check_os.sh @@ -1,4 +1,4 @@ -# /bin/bash +#!/bin/bash # CSI=$(echo -e "\033[") CRED="${CSI}1;31m" diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2d3ed2e0f8f97c4604471659415a691d1b704a60..33097bd5411f0fc0239b7e571a69de4a6f8408fc 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -303,7 +303,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -358,7 +358,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 @@ -679,8 +679,8 @@ function install_service_on_systemd() { taosd_service_config="${service_config_dir}/taosd.service" ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -756,6 +756,11 @@ function install_service_on_systemd() { fi } +function install_blm3_service() { + [ -f ${script_dir}/cfg/blm3.service ] &&\ + ${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/ +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -878,6 +883,7 @@ function update_TDengine() { if [ -z $1 ]; then install_bin install_service + install_blm3_service install_config install_blm3_config @@ -959,6 +965,7 @@ function install_TDengine() { # For installing new install_bin install_service + install_blm3_service openresty_work=false if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 05eb09d8f3a8b5237c36714e964530b877e332de..0e0ee7ba31f4715b2c5585dd040727d604aa90b1 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -342,7 +342,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh index 527f9a231e5a97fa086ef655cd420abc61677fcf..e5675b858066148df07508ad2438b0f00d7ce7bf 100755 --- a/packaging/tools/install_pro.sh +++ b/packaging/tools/install_pro.sh @@ -278,7 +278,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -305,7 +305,7 @@ function set_hostname() { echo "set hostname fail!" return fi - + #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then ${csudo} echo $newHostname > /etc/hostname ||: @@ -330,7 +330,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh index 52e08cb6b0d00b25686b87e2f066401e0388d4ce..ef5fb8c05a4a98a55918ee217125bd0f0a09b955 100755 --- a/packaging/tools/install_tq.sh +++ b/packaging/tools/install_tq.sh @@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -342,7 +342,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 7fbdbab1c798af572fc67cf79f27812ea64d3bae..c29c1cd665a11596b83234d1b0343bbab1cf5dc1 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -212,7 +212,8 @@ function install_jemalloc() { fi if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then /usr/bin/install -c -d /usr/local/include/jemalloc - /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\ + /usr/local/include/jemalloc fi if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then /usr/bin/install -c -d /usr/local/lib @@ -225,23 +226,47 @@ function install_jemalloc() { /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then /usr/bin/install -c -d /usr/local/lib/pkgconfig - /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\ + /usr/local/lib/pkgconfig + fi + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" fi fi if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then /usr/bin/install -c -d /usr/local/share/doc/jemalloc - /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\ + /usr/local/share/doc/jemalloc fi if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then /usr/bin/install -c -d /usr/local/share/man/man3 - /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\ + /usr/local/share/man/man3 fi - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf - ${csudo} ldconfig - else - echo "/etc/ld.so.conf.d not found!" + fi +} + +function install_avro() { + if [ "$osType" != "Darwin" ]; then + if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then + /usr/bin/install -c -d /usr/local/$1 + /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1 + ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ln -sf libavro.so.23 /usr/local/$1/libavro.so + /usr/bin/install -c -d /usr/local/$1 + [ -f ${binary_dir}/build/$1/libavro.a ] && + /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi fi fi } @@ -292,6 +317,8 @@ function install_lib() { fi install_jemalloc + install_avro lib + install_avro lib64 if [ "$osType" != "Darwin" ]; then ${csudo} ldconfig @@ -381,11 +408,6 @@ function install_data() { } function install_connector() { - if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then - ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector else @@ -481,8 +503,8 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -503,6 +525,11 @@ function install_service_on_systemd() { ${csudo} systemctl enable taosd } +function install_blm3_service() { + [ -f ${script_dir}/cfg/blm3.service ] &&\ + ${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/ +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -544,6 +571,7 @@ function update_TDengine() { if [ "$osType" != "Darwin" ]; then install_service + install_blm3_service fi install_config @@ -598,6 +626,7 @@ function install_TDengine() { if [ "$osType" != "Darwin" ]; then install_service + install_blm3_service fi install_config diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d26f617e421406364ce4d34c4baf5c55b904a2b5..39a35e384fffdd4f319e72fbeb819fe08f7871b8 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 89591cac234b190f55d144ccf98cb2d5c70a7936..19e24b3dafb7f1f95832e637e181449e4c381faf 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh index 599c91fbf082955887c677b750aa12f946c0890b..4a0b033d30e6478f37a62f9cc896aee0903d39c9 100755 --- a/packaging/tools/makeclient_pro.sh +++ b/packaging/tools/makeclient_pro.sh @@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh index 03d9b13059daadfdc7207c78b6f89cae321f25ac..1cc7003661a7491b1df625916dd289de32434ee9 100755 --- a/packaging/tools/makeclient_tq.sh +++ b/packaging/tools/makeclient_tq.sh @@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index f0c25208529768fb387262a668381a57e34f51ac..7071912fc8133fb2bf1b15f992ff61c514bb79a1 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index dbb7e6887fa1b0f96ea68f1c880ee77ced0858bd..0b24100c3eb6be74ee4b415759a263647a395da3 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh index 1668838be0522bc02ab027b6ee4ac6ff250fefa2..a69e542c3c5969d609f8d5a00b6428add15fd950 100755 --- a/packaging/tools/makepkg_pro.sh +++ b/packaging/tools/makepkg_pro.sh @@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo #if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then # cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: -# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then -# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin -# else -# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" -# fi # if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then # cp -r ${connector_dir}/go ${install_dir}/connector # else diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh index 416a3f60a4a57d6afa34d1d8f931a7efd68d6958..ccf42a8aab090b95de8e889b3a8186be9a6cba7a 100755 --- a/packaging/tools/makepkg_tq.sh +++ b/packaging/tools/makepkg_tq.sh @@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 9956455691a9d042d20082eb70cd23d99c1cca77..418b9bd0f9b5ea82f49ad5c8165f628c90f472d2 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -2,7 +2,7 @@ # # This file is used to install tdengine rpm package on centos systems. The operating system # is required to use systemd to manage services at boot -#set -x +# set -x iplist="" serverFqdn="" @@ -86,6 +86,24 @@ function install_include() { ${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h } +function install_avro_lib() { + ${csudo} rm -f ${lib_link_dir}/libavro* || : + ${csudo} rm -f ${lib64_link_dir}/libavro* || : + + if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then + ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0 + ${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23 + ${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then + ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || : + ${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || : + ${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || : + fi + fi + + ${csudo} ldconfig +} function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos* || : ${csudo} rm -f ${lib64_link_dir}/libtaos* || : @@ -97,6 +115,8 @@ function install_lib() { ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi + + ${csudo} ldconfig } function install_bin() { @@ -127,7 +147,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -182,7 +202,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 @@ -424,8 +444,8 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -446,6 +466,11 @@ function install_service_on_systemd() { ${csudo} systemctl enable taosd } +function install_blm3_service() { + [ -f ${script_dir}/cfg/blm3.service ] &&\ + ${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/ +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -474,8 +499,10 @@ function install_TDengine() { # Install include, lib, binary and service install_include install_lib + install_avro_lib install_bin install_service + install_blm3_service install_config install_blm3_config diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 16a892d26c1d11cddf5dc15758e784c9ff268822..7f6ef5c27d182fcaa1f9ea80f1169b389db8b014 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -58,6 +58,12 @@ function kill_taosd() { } function clean_service_on_systemd() { + blm3_service_config="${service_config_dir}/blm3.service" + if systemctl is-active --quiet blm3; then + echo "blm3 is running, stopping it..." + ${csudo} systemctl stop blm3 &> /dev/null || echo &> /dev/null + fi + taosd_service_config="${service_config_dir}/${taos_service_name}.service" if systemctl is-active --quiet ${taos_service_name}; then @@ -67,6 +73,9 @@ function clean_service_on_systemd() { ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + + [ -f ${blm3_service_config} ] && ${csudo} rm -f ${blm3_service_config} + } function clean_service_on_sysvinit() { diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index f4c3350b7861ce8c027b54641e56fa99f87afbb8..be5163c4540e04e5f381357a984175904dadccef 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -111,12 +111,14 @@ function clean_log() { function clean_service_on_systemd() { taosd_service_config="${service_config_dir}/${taos_service_name}.service" + blm3_service_config="${service_config_dir}/blm3.service" if systemctl is-active --quiet ${taos_service_name}; then echo "TDengine taosd is running, stopping it..." ${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + [ -f ${blm3_service_config} ] && ${sudo} rm -f ${blm3_service_config} tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh index 2f466f94f08555b5c8cf8d5b4abe459f52ece49f..731f5b396f4bed78488a659dbb2b13e832deccf6 100755 --- a/packaging/tools/startPre.sh +++ b/packaging/tools/startPre.sh @@ -48,4 +48,3 @@ if [ ${coreFlag} = "unlimited" ];then fi fi -/usr/bin/blm3 & diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 47af7568642d0badccda51a28c09d321cf782571..64e3af498cedd25dea90055426110522bc4a4086 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core20 -version: '2.3.0.0' +version: '2.3.1.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | diff --git a/src/client/inc/tscParseLine.h b/src/client/inc/tscParseLine.h index 74ba9ab3d9c5251e1cf8ab4e8549c8da0353ea49..fef55011b0faec1d15876764b3fd9808ec2b4e39 100644 --- a/src/client/inc/tscParseLine.h +++ b/src/client/inc/tscParseLine.h @@ -66,8 +66,7 @@ typedef struct { int32_t affectedRows; } SSmlLinesInfo; - -void addEscapeCharToString(char *str, int32_t len); +char* addEscapeCharToString(char *str, int32_t len); int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info); bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info); bool isValidInteger(char *str); diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index a6953add19f4d0949caa6513c8ee6e3cf2a871e3..0b4eedca69631263442ad5fff7659a922aa44603 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* if (code != 0) { tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName); return code; - } else { - pointSchema->precision = dbSchema.precision; - destroySmlSTableSchema(&dbSchema); } - } else if (code == TSDB_CODE_SUCCESS) { + } + + if (code == TSDB_CODE_SUCCESS) { + pointSchema->precision = dbSchema.precision; size_t pointTagSize = taosArrayGetSize(pointSchema->tags); size_t pointFieldSize = taosArrayGetSize(pointSchema->fields); @@ -1177,13 +1177,14 @@ static void escapeSpecialCharacter(uint8_t field, const char **pos) { *pos = cur; } -void addEscapeCharToString(char *str, int32_t len) { +char* addEscapeCharToString(char *str, int32_t len) { if (str == NULL) { - return; + return NULL; } memmove(str + 1, str, len); str[0] = str[len + 1] = TS_ESCAPE_CHAR; str[len + 2] = '\0'; + return str; } bool isValidInteger(char *str) { @@ -1907,8 +1908,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash } //Escape special character if (*cur == '\\') { - //TODO: escape will work after column & tag - //support spcial characters escapeSpecialCharacter(2, &cur); } key[len] = *cur; @@ -1985,6 +1984,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, //Escape special character if (*cur == '\\') { escapeSpecialCharacter(isTag ? 2 : 3, &cur); + len++; } cur++; len++; @@ -2107,6 +2107,13 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, pkv = *pKVs; } + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + if (childTableNameLen != 0) { + memcpy(childTableName, tsSmlChildTableName, childTableNameLen); + addEscapeCharToString(childTableName, (int32_t)(childTableNameLen)); + } + while (*cur != '\0') { ret = parseSmlKey(pkv, &cur, pHash, info); if (ret) { @@ -2118,7 +2125,8 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, tscError("SML:0x%"PRIx64" Unable to parse value", info->id); goto error; } - if (!isField && (strcasecmp(pkv->key, "`ID`") == 0)) { + + if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) { smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1); memcpy(smlData->childTableName, pkv->value, pkv->length); strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length); diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index f6b723ef3cd554a4062035c6352ee485022340ac..e78abf0596447df0ee58db88ca87b19011293c6c 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -305,6 +305,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, *pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV)); pkv = *pKVs; + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTbName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + if (childTableNameLen != 0) { + memcpy(childTbName, tsSmlChildTableName, childTableNameLen); + addEscapeCharToString(childTbName, (int32_t)(childTableNameLen)); + } while (*cur != '\0') { ret = parseTelnetTagKey(pkv, &cur, pHash, info); if (ret) { @@ -316,7 +322,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, tscError("OTD:0x%"PRIx64" Unable to parse value", info->id); return ret; } - if ((strcasecmp(pkv->key, "`ID`") == 0)) { + if (childTableNameLen != 0 && strcasecmp(pkv->key, childTbName) == 0) { *childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1); memcpy(*childTableName, pkv->value, pkv->length); (*childTableName)[pkv->length] = '\0'; @@ -892,26 +898,33 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, if (tags == NULL || tags->type != cJSON_Object) { return TSDB_CODE_TSC_INVALID_JSON; } - //only pick up the first ID value as child table name - cJSON *id = cJSON_GetObjectItem(tags, "ID"); - if (id != NULL) { - if (!cJSON_IsString(id)) { - tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id); - return TSDB_CODE_TSC_INVALID_JSON; - } - size_t idLen = strlen(id->valuestring); - *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); - memcpy(*childTableName, id->valuestring, idLen); - strntolower_s(*childTableName, *childTableName, (int32_t)idLen); - addEscapeCharToString(*childTableName, (int32_t)idLen); - - //check duplicate IDs - cJSON_DeleteItemFromObject(tags, "ID"); - id = cJSON_GetObjectItem(tags, "ID"); + + //handle child table name + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTbName[TSDB_TABLE_NAME_LEN] = {0}; + if (childTableNameLen != 0) { + memcpy(childTbName, tsSmlChildTableName, childTableNameLen); + cJSON *id = cJSON_GetObjectItem(tags, childTbName); if (id != NULL) { - return TSDB_CODE_TSC_DUP_TAG_NAMES; + if (!cJSON_IsString(id)) { + tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + size_t idLen = strlen(id->valuestring); + *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); + memcpy(*childTableName, id->valuestring, idLen); + strntolower_s(*childTableName, *childTableName, (int32_t)idLen); + addEscapeCharToString(*childTableName, (int32_t)idLen); + + //check duplicate IDs + cJSON_DeleteItemFromObject(tags, childTbName); + id = cJSON_GetObjectItem(tags, childTbName); + if (id != NULL) { + return TSDB_CODE_TSC_DUP_TAG_NAMES; + } } } + int32_t tagNum = cJSON_GetArraySize(tags); //at least one tag pair required if (tagNum <= 0) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index a5f40aea165e861250f4c7f2d940b4e8d65db449..ea9c340ff0a9b40b2eb486c97ec32f247af3bc57 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2495,6 +2495,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg12 = "parameter is out of range [1, 100]"; const char* msg13 = "parameter list required"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; + const char* msg15 = "parameter is out of range [1, 1000]"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2942,11 +2943,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { + if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); - int64_t numRowsSelected = GET_INT32_VAL(val); + int64_t numRowsSelected = GET_INT64_VAL(val); if (numRowsSelected <= 0 || numRowsSelected > 1000) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15); } // todo REFACTOR diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 799bacda2ba9a3b52a99859edb5968d8602b4c33..bd201d980017522d0e32f6124290305d5b136f8d 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime; // common extern int tsRpcTimer; extern int tsRpcMaxTime; -extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled +extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled extern int32_t tsMaxConnections; extern int32_t tsMaxShellConns; extern int32_t tsShellActivityTimer; @@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores; extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; -extern char tsCharset[]; // default encode string +extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; extern int32_t tsCompressColData; extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; -//query buffer management -extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing -extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing -extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked +// query buffer management +extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing +extern int64_t + tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing +extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked -extern int8_t tsKeepOriginalColumnName; +extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; @@ -108,7 +109,7 @@ extern int32_t tsQuorum; extern int8_t tsUpdate; extern int8_t tsCacheLastRow; -//tsdb +// tsdb extern bool tsdbForceKeepFile; extern bool tsdbForceCompactFile; extern int32_t tsdbWalFlushSize; @@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress; extern int8_t tsHttpEnableRecordSql; extern int8_t tsTelegrafUseFieldNum; extern int8_t tsHttpDbNameMandatory; +extern int32_t tsHttpKeepAlive; // mqtt extern int8_t tsEnableMqttModule; @@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3]; extern int32_t tsTopicBianryLen; // system info -extern char tsOsName[]; -extern int64_t tsPageSize; -extern int64_t tsOpenMax; -extern int64_t tsStreamMax; -extern int32_t tsNumOfCores; -extern float tsTotalLogDirGB; -extern float tsTotalTmpDirGB; -extern float tsTotalDataDirGB; -extern float tsAvailLogDirGB; -extern float tsAvailTmpDirectorySpace; -extern float tsAvailDataDirGB; -extern float tsUsedDataDirGB; -extern float tsMinimalLogDirGB; -extern float tsReservedTmpDirectorySpace; -extern float tsMinimalDataDirGB; -extern int32_t tsTotalMemoryMB; +extern char tsOsName[]; +extern int64_t tsPageSize; +extern int64_t tsOpenMax; +extern int64_t tsStreamMax; +extern int32_t tsNumOfCores; +extern float tsTotalLogDirGB; +extern float tsTotalTmpDirGB; +extern float tsTotalDataDirGB; +extern float tsAvailLogDirGB; +extern float tsAvailTmpDirectorySpace; +extern float tsAvailDataDirGB; +extern float tsUsedDataDirGB; +extern float tsMinimalLogDirGB; +extern float tsReservedTmpDirectorySpace; +extern float tsMinimalDataDirGB; +extern int32_t tsTotalMemoryMB; extern uint32_t tsVersion; // build info @@ -196,43 +198,44 @@ extern char gitinfoOfInternal[]; extern char buildinfo[]; // log -extern int8_t tsAsyncLog; -extern int32_t tsNumOfLogLines; -extern int32_t tsLogKeepDays; -extern int32_t dDebugFlag; -extern int32_t vDebugFlag; -extern int32_t mDebugFlag; +extern int8_t tsAsyncLog; +extern int32_t tsNumOfLogLines; +extern int32_t tsLogKeepDays; +extern int32_t dDebugFlag; +extern int32_t vDebugFlag; +extern int32_t mDebugFlag; extern uint32_t cDebugFlag; -extern int32_t jniDebugFlag; -extern int32_t tmrDebugFlag; -extern int32_t sdbDebugFlag; -extern int32_t httpDebugFlag; -extern int32_t mqttDebugFlag; -extern int32_t monDebugFlag; -extern int32_t uDebugFlag; -extern int32_t rpcDebugFlag; -extern int32_t odbcDebugFlag; +extern int32_t jniDebugFlag; +extern int32_t tmrDebugFlag; +extern int32_t sdbDebugFlag; +extern int32_t httpDebugFlag; +extern int32_t mqttDebugFlag; +extern int32_t monDebugFlag; +extern int32_t uDebugFlag; +extern int32_t rpcDebugFlag; +extern int32_t odbcDebugFlag; extern uint32_t qDebugFlag; -extern int32_t wDebugFlag; -extern int32_t cqDebugFlag; -extern int32_t debugFlag; +extern int32_t wDebugFlag; +extern int32_t cqDebugFlag; +extern int32_t debugFlag; extern int8_t tsClientMerge; #ifdef TD_TSZ // lossy -extern char lossyColumns[]; -extern double fPrecision; -extern double dPrecision; +extern char lossyColumns[]; +extern double fPrecision; +extern double dPrecision; extern uint32_t maxRange; extern uint32_t curRange; -extern char Compressor[]; +extern char Compressor[]; #endif // long query extern int8_t tsDeadLockKillQuery; // schemaless extern char tsDefaultJSONStrType[]; +extern char tsSmlChildTableName[]; typedef struct { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ebfd5e18756298c18d1d2060bed30b2aee00d1b0..c1a254b4ebd5fdfe1d29e02ab7cacbe3195058f1 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -14,18 +14,18 @@ */ #define _DEFAULT_SOURCE +#include "tglobal.h" +#include "monitor.h" #include "os.h" #include "taosdef.h" #include "taoserror.h" -#include "tulog.h" +#include "tcompare.h" #include "tconfig.h" -#include "tglobal.h" -#include "monitor.h" -#include "tsocket.h" -#include "tutil.h" #include "tlocale.h" +#include "tsocket.h" #include "ttimezone.h" -#include "tcompare.h" +#include "tulog.h" +#include "tutil.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -49,16 +49,16 @@ int32_t tsDnodeId = 0; int64_t tsDnodeStartTime = 0; // common -int32_t tsRpcTimer = 300; -int32_t tsRpcMaxTime = 600; // seconds; -int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default -int32_t tsMaxShellConns = 50000; +int32_t tsRpcTimer = 300; +int32_t tsRpcMaxTime = 600; // seconds; +int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default +int32_t tsMaxShellConns = 50000; int32_t tsMaxConnections = 5000; -int32_t tsShellActivityTimer = 3; // second +int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; int32_t tsNumOfCommitThreads = 4; float tsRatioOfQueryCores = 1.0f; -int8_t tsDaylight = 0; +int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string @@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN; -int8_t tsTscEnableRecordSql = 0; +int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from // one virtual node, to order according to timestamp @@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000; int32_t tsMinSlidingTime = 10; // the maxinum number of distict query result -int32_t tsMaxNumOfDistinctResults = 1000 * 10000; +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000; int32_t tsStreamCompStartDelay = 10000; // the stream computing delay time after executing failed, change accordingly -int32_t tsRetryStreamCompDelay = 10*1000; +int32_t tsRetryStreamCompDelay = 10 * 1000; // The delayed computing ration. 10% of the whole computing time window by default. float tsStreamComputDelayRatio = 0.1f; @@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1; int32_t tsRetrieveBlockingModel = 0; // last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name -int8_t tsKeepOriginalColumnName = 0; +int8_t tsKeepOriginalColumnName = 0; // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; -int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; -int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP; +int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; +int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP; int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK; int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK; -int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds +int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; -int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; -int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; -int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; -int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; -int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; -int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION; -int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; -int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; -int32_t tsMaxVgroupsPerDb = 0; +int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; +int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; +int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; +int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; +int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; +int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION; +int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; +int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; +int32_t tsMaxVgroupsPerDb = 0; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO; -// tsdb config +// tsdb config // For backward compatibility bool tsdbForceKeepFile = false; -bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly +bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB // balance int8_t tsEnableBalance = 1; int8_t tsAlternativeRole = 0; -int32_t tsBalanceInterval = 300; // seconds +int32_t tsBalanceInterval = 300; // seconds int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days int32_t tsMnodeEqualVnodeNum = 4; int8_t tsEnableFlowCtrl = 1; @@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1; int8_t tsHttpEnableRecordSql = 0; int8_t tsTelegrafUseFieldNum = 0; int8_t tsHttpDbNameMandatory = 0; +int32_t tsHttpKeepAlive = 30000; // mqtt int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default -char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; -char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; -char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; -char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; -char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; -char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # +char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; +char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; +char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; +char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; +char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; +char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # // monitor int8_t tsEnableMonitorModule = 1; @@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey"; int32_t tsMonitorInterval = 30; // seconds // stream -int8_t tsEnableStream = 1; +int8_t tsEnableStream = 1; // internal int8_t tsCompactMnodeWal = 0; @@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0}; char tsScriptDir[PATH_MAX] = {0}; char tsTempDir[PATH_MAX] = "/tmp/"; -int32_t tsDiskCfgNum = 0; +int32_t tsDiskCfgNum = 0; int32_t tsTopicBianryLen = 16000; #ifndef _STORAGE @@ -231,42 +232,42 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS]; int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L}; // system info -char tsOsName[10] = "Linux"; -int64_t tsPageSize; -int64_t tsOpenMax; -int64_t tsStreamMax; -int32_t tsNumOfCores = 1; -float tsTotalTmpDirGB = 0; -float tsTotalDataDirGB = 0; -float tsAvailTmpDirectorySpace = 0; -float tsAvailDataDirGB = 0; -float tsUsedDataDirGB = 0; -float tsReservedTmpDirectorySpace = 1.0f; -float tsMinimalDataDirGB = 2.0f; -int32_t tsTotalMemoryMB = 0; +char tsOsName[10] = "Linux"; +int64_t tsPageSize; +int64_t tsOpenMax; +int64_t tsStreamMax; +int32_t tsNumOfCores = 1; +float tsTotalTmpDirGB = 0; +float tsTotalDataDirGB = 0; +float tsAvailTmpDirectorySpace = 0; +float tsAvailDataDirGB = 0; +float tsUsedDataDirGB = 0; +float tsReservedTmpDirectorySpace = 1.0f; +float tsMinimalDataDirGB = 2.0f; +int32_t tsTotalMemoryMB = 0; uint32_t tsVersion = 0; // log -int32_t tsNumOfLogLines = 10000000; -int32_t mDebugFlag = 131; -int32_t sdbDebugFlag = 131; -int32_t dDebugFlag = 135; -int32_t vDebugFlag = 135; +int32_t tsNumOfLogLines = 10000000; +int32_t mDebugFlag = 131; +int32_t sdbDebugFlag = 131; +int32_t dDebugFlag = 135; +int32_t vDebugFlag = 135; uint32_t cDebugFlag = 131; -int32_t jniDebugFlag = 131; -int32_t odbcDebugFlag = 131; -int32_t httpDebugFlag = 131; -int32_t mqttDebugFlag = 131; -int32_t monDebugFlag = 131; +int32_t jniDebugFlag = 131; +int32_t odbcDebugFlag = 131; +int32_t httpDebugFlag = 131; +int32_t mqttDebugFlag = 131; +int32_t monDebugFlag = 131; uint32_t qDebugFlag = 131; -int32_t rpcDebugFlag = 131; -int32_t uDebugFlag = 131; -int32_t debugFlag = 0; -int32_t sDebugFlag = 135; -int32_t wDebugFlag = 135; -int32_t tsdbDebugFlag = 131; -int32_t cqDebugFlag = 131; -int32_t fsDebugFlag = 135; +int32_t rpcDebugFlag = 131; +int32_t uDebugFlag = 131; +int32_t debugFlag = 0; +int32_t sDebugFlag = 135; +int32_t wDebugFlag = 135; +int32_t tsdbDebugFlag = 131; +int32_t cqDebugFlag = 131; +int32_t fsDebugFlag = 135; int8_t tsClientMerge = 0; @@ -274,13 +275,14 @@ int8_t tsClientMerge = 0; // // lossy compress 6 // -char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress. -// below option can take effect when tsLossyColumns not empty -double fPrecision = 1E-8; // float column precision -double dPrecision = 1E-16; // double column precision -uint32_t maxRange = 500; // max range -uint32_t curRange = 100; // range -char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR +char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty + // can close lossy compress. +// below option can take effect when tsLossyColumns not empty +double fPrecision = 1E-8; // float column precision +double dPrecision = 1E-16; // double column precision +uint32_t maxRange = 500; // max range +uint32_t curRange = 100; // range +char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR #endif // long query death-lock @@ -288,6 +290,7 @@ int8_t tsDeadLockKillQuery = 0; // default JSON string type char tsDefaultJSONStrType[7] = "binary"; +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash. int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -298,7 +301,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"}; static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT; void taosSetAllDebugFlag() { - if (debugFlag != 0) { + if (debugFlag != 0) { mDebugFlag = debugFlag; sdbDebugFlag = debugFlag; dDebugFlag = debugFlag; @@ -309,7 +312,7 @@ void taosSetAllDebugFlag() { httpDebugFlag = debugFlag; mqttDebugFlag = debugFlag; monDebugFlag = debugFlag; - qDebugFlag = debugFlag; + qDebugFlag = debugFlag; rpcDebugFlag = debugFlag; uDebugFlag = debugFlag; sDebugFlag = debugFlag; @@ -321,12 +324,13 @@ void taosSetAllDebugFlag() { } bool taosCfgDynamicOptions(char *msg) { - char *option, *value; - int32_t olen, vlen; - int32_t vint = 0; + char *option, *value; + int32_t olen, vlen; + int32_t vint = 0; paGetToken(msg, &option, &olen); - if (olen == 0) return false;; + if (olen == 0) return false; + ; paGetToken(option + olen + 1, &value, &vlen); if (vlen == 0) @@ -339,9 +343,9 @@ bool taosCfgDynamicOptions(char *msg) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; - //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; + // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue; - + int32_t cfgLen = (int32_t)strlen(cfg->option); if (cfgLen != olen) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue; @@ -370,7 +374,7 @@ bool taosCfgDynamicOptions(char *msg) { return true; } if (strncasecmp(cfg->option, "debugFlag", olen) == 0) { - taosSetAllDebugFlag(); + taosSetAllDebugFlag(); } return true; } @@ -427,7 +431,7 @@ static void taosCheckDataDirCfg() { } static int32_t taosCheckTmpDir(void) { - if (strlen(tsTempDir) <= 0){ + if (strlen(tsTempDir) <= 0) { uError("tempDir is not set"); return -1; } @@ -448,7 +452,7 @@ static void doInitGlobalConfig(void) { srand(taosSafeRand()); SGlobalCfg cfg = {0}; - + // ip address cfg.option = "firstEp"; cfg.ptr = tsFirst; @@ -577,12 +581,12 @@ static void doInitGlobalConfig(void) { cfg.ptr = &tsMaxNumOfDistinctResults; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; - cfg.minValue = 10*10000; - cfg.maxValue = 10000*10000; + cfg.minValue = 10 * 10000; + cfg.maxValue = 10000 * 10000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1189,7 +1193,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - // module configs + // module configs cfg.option = "flowctrl"; cfg.ptr = &tsEnableFlowCtrl; cfg.valType = TAOS_CFG_VTYPE_INT8; @@ -1320,6 +1324,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // pContext in cache + cfg.option = "httpKeepAlive"; + cfg.ptr = &tsHttpKeepAlive; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 3000; + cfg.maxValue = 3600000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // debug flag cfg.option = "numOfLogLines"; cfg.ptr = &tsNumOfLogLines; @@ -1401,7 +1416,6 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "sdbDebugFlag"; cfg.ptr = &sdbDebugFlag; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1633,7 +1647,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - // enable kill long query + // enable kill long query cfg.option = "deadLockKillQuery"; cfg.ptr = &tsDeadLockKillQuery; cfg.valType = TAOS_CFG_VTYPE_INT8; @@ -1665,6 +1679,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // child talbe name specified in schemaless tag value + cfg.option = "smlChildTableName"; + cfg.ptr = tsSmlChildTableName; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(tsSmlChildTableName); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks cfg.option = "walFlushSize"; cfg.ptr = &tsdbWalFlushSize; @@ -1731,21 +1756,18 @@ static void doInitGlobalConfig(void) { #else assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); #endif - } -void taosInitGlobalCfg() { - pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); -} +void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); } int32_t taosCheckGlobalCfg() { - char fqdn[TSDB_FQDN_LEN]; + char fqdn[TSDB_FQDN_LEN]; uint16_t port; if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { taosSetAllDebugFlag(); } - + if (tsLocalFqdn[0] == 0) { taosGetFqdn(tsLocalFqdn); } @@ -1772,7 +1794,7 @@ int32_t taosCheckGlobalCfg() { if (taosCheckTmpDir()) { return -1; } - + taosGetSystemInfo(); tsSetLocale(); @@ -1794,8 +1816,8 @@ int32_t taosCheckGlobalCfg() { } if (tsMaxTablePerVnode < tsMinTablePerVnode) { - uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", - tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode); + uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode, + tsMinTablePerVnode, tsMinTablePerVnode); tsMaxTablePerVnode = tsMinTablePerVnode; } @@ -1817,7 +1839,7 @@ int32_t taosCheckGlobalCfg() { } tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035] - tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp + tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp tsSyncPort = tsServerPort + TSDB_PORT_SYNC; tsHttpPort = tsServerPort + TSDB_PORT_HTTP; @@ -1837,17 +1859,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { strcpy(fqdn, ep); char *temp = strchr(fqdn, ':'); - if (temp) { + if (temp) { *temp = 0; - *port = atoi(temp+1); - } - + *port = atoi(temp + 1); + } + if (*port == 0) { *port = tsServerPort; return -1; } - return 0; + return 0; } /* diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 065dedac63372f5c71146ee9937a6e136d71ce81..c5b59baefedc38fa4bf558526a8c4a1777bfb7bb 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.36-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 7caf46848d18c4491cdea1ab50df31d8d2d26daf..926a5ef483d9f1da07dbfdeb796567d3ea077c87 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.35 + 2.0.36 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index a586879afe61b9272712a14f36c60fbd85ba80ed..04115e2a0ebc5924a51862cd9a49a5352cf6a5b6 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.35 + 2.0.36 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -58,6 +58,13 @@ 4.13.1 test + + + commons-logging + commons-logging + 1.2 + test + @@ -70,6 +77,18 @@ + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + + jar + + + + org.apache.maven.plugins maven-assembly-plugin diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 307451e014c59c1c3419f1a9daff4f89e8b90d46..0fef64a6f82706e30677ad4e74604924c5cc2e60 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -135,7 +135,6 @@ public class TSDBDriver extends AbstractDriver { TSDBJNIConnector.init(props); return new TSDBConnection(props, this.dbMetaData); } catch (SQLWarning sqlWarning) { - sqlWarning.printStackTrace(); return new TSDBConnection(props, this.dbMetaData); } catch (SQLException sqlEx) { throw sqlEx; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 22fb0c4ae4987ade0a406fe5628bf80d975f3ae5..42ebedf4027b0e333b9e79b8045f1bae0d338ac7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -36,15 +36,15 @@ import java.util.regex.Pattern; * compatibility needs. */ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { + // for jdbc preparedStatement interface private String rawSql; private Object[] parameters; - - private ArrayList colData; + // for parameter binding + private long nativeStmtHandle = 0; + private String tableName; private ArrayList tableTags; private int tagValueLength; - - private String tableName; - private long nativeStmtHandle = 0; + private ArrayList colData; TSDBPreparedStatement(TSDBConnection connection, String sql) { super(connection); @@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat preprocessSql(); } - /* - * - */ - /** * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in @@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setObject(int parameterIndex, Object x) throws SQLException { - if (isClosed()) { + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - if (parameterIndex < 1 && parameterIndex >= parameters.length) { + if (parameterIndex < 1 && parameterIndex >= parameters.length) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - } parameters[parameterIndex - 1] = x; } @@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - // TODO: throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - //TODO: throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } @Override @@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat /////////////////////////////////////////////////////////////////////// // NOTE: the following APIs are not JDBC compatible - // set the bind table name + // parameter binding private static class ColumnInfo { @SuppressWarnings("rawtypes") private ArrayList data; @@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } - public void setTableName(String name) { + public void setTableName(String name) throws SQLException { + if (this.tableName != null) { + this.columnDataExecuteBatch(); + this.columnDataClearBatchInternal(); + } this.tableName = name; } @@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void columnDataExecuteBatch() throws SQLException { TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); connector.executeBatch(this.nativeStmtHandle); - this.columnDataClearBatch(); + this.columnDataClearBatchInternal(); } + @Deprecated public void columnDataClearBatch() { + columnDataClearBatchInternal(); + } + + private void columnDataClearBatchInternal() { int size = this.colData.size(); this.colData.clear(); - this.colData.addAll(Collections.nCopies(size, null)); this.tableName = null; // clear the table name } + public void columnDataCloseBatch() throws SQLException { TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); connector.closeBatch(this.nativeStmtHandle); @@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.nativeStmtHandle = 0L; this.tableName = null; } + + @Override + public void close() throws SQLException { + this.columnDataClearBatchInternal(); + this.columnDataCloseBatch(); + super.close(); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index d5985756ee1851407bf19a568657fa2127d0be43..36714893e3ca519dea07910a95d5ee1c1b6fb731 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -50,9 +50,13 @@ public class RestfulDriver extends AbstractDriver { String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; } catch (UnsupportedEncodingException e) { - e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } + int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); + boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); + + HttpClientPoolUtil.init(poolSize, keepAlive); String result = HttpClientPoolUtil.execute(loginUrl); JSONObject jsonResult = JSON.parseObject(result); String status = jsonResult.getString("status"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java index 99e46bc64f44f6326aec12734849cc5ef518c903..fc116b32c2a154c9479e4933d887ac7ddcedbe9f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java @@ -5,12 +5,11 @@ import com.taosdata.jdbc.TSDBErrorNumbers; import org.apache.http.HeaderElement; import org.apache.http.HeaderElementIterator; import org.apache.http.HttpEntity; -import org.apache.http.NoHttpResponseException; import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.HttpRequestRetryHandler; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.*; import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.conn.ClientConnectionManager; import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; @@ -21,21 +20,20 @@ import org.apache.http.protocol.HTTP; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; -import javax.net.ssl.SSLException; import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.sql.SQLException; +import java.util.concurrent.TimeUnit; public class HttpClientPoolUtil { private static final String DEFAULT_CONTENT_TYPE = "application/json"; private static final int DEFAULT_MAX_RETRY_COUNT = 5; - private static final int DEFAULT_MAX_TOTAL = 50; - private static final int DEFAULT_MAX_PER_ROUTE = 5; + public static final String DEFAULT_HTTP_KEEP_ALIVE = "true"; + public static final String DEFAULT_MAX_PER_ROUTE = "20"; private static final int DEFAULT_HTTP_KEEP_TIME = -1; + private static String isKeepAlive; private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); @@ -55,36 +53,39 @@ public class HttpClientPoolUtil { private static CloseableHttpClient httpClient; - static { - - PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); - connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); - connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); - - httpClient = HttpClients.custom() - .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) - .setConnectionManager(connectionManager) - .setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT) - .build(); + public static void init(Integer connPoolSize, boolean keepAlive) { + if (httpClient == null) { + synchronized (HttpClientPoolUtil.class) { + if (httpClient == null) { + isKeepAlive = keepAlive ? HTTP.CONN_KEEP_ALIVE : HTTP.CONN_CLOSE; + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + connectionManager.setMaxTotal(connPoolSize * 10); + connectionManager.setDefaultMaxPerRoute(connPoolSize); + httpClient = HttpClients.custom() + .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) + .setConnectionManager(connectionManager) + .setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT) + .build(); + } + } + } } /*** execute GET request ***/ public static String execute(String uri) throws SQLException { HttpEntity httpEntity = null; String responseBody = ""; - try { - HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); + HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); + HttpContext context = HttpClientContext.create(); + + try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) { httpEntity = httpResponse.getEntity(); if (httpEntity != null) { responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } } catch (ClientProtocolException e) { - e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); } catch (IOException exception) { - exception.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { @@ -94,30 +95,27 @@ public class HttpClientPoolUtil { return responseBody; } - /*** execute POST request ***/ public static String execute(String uri, String data, String token) throws SQLException { + + HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); + method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); + method.setHeader(HTTP.CONN_DIRECTIVE, isKeepAlive); + method.setHeader("Authorization", "Taosd " + token); + method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); + HttpContext context = HttpClientContext.create(); + HttpEntity httpEntity = null; String responseBody = ""; - try { - HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); - method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); - method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE); - method.setHeader("Authorization", "Taosd " + token); - - method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); + try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) { httpEntity = httpResponse.getEntity(); if (httpEntity == null) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data); } responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } catch (ClientProtocolException e) { - e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); } catch (IOException exception) { - exception.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { @@ -148,4 +146,12 @@ public class HttpClientPoolUtil { return method; } + + public static void reset() { + synchronized (HttpClientPoolUtil.class) { + ClientConnectionManager cm = httpClient.getConnectionManager(); + cm.closeExpiredConnections(); + cm.closeIdleConnections(100, TimeUnit.MILLISECONDS); + } + } } \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java index a427103770cff7f51355024688454824d7263c77..d4664f2678013b3de87bcd3f0dc24631be511ede 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java @@ -16,7 +16,6 @@ public class TaosInfo implements TaosInfoMBean { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo"); server.registerMBean(TaosInfo.getInstance(), name); - } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java index e1c4bddb2812f658336c895249886f603681e632..6cd1ff7200962b7347969e0b8b10443083505912 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -49,14 +49,9 @@ public class Utils { try { return parseMicroSecTimestamp(timeStampStr); } catch (DateTimeParseException ee) { - try { - return parseNanoSecTimestamp(timeStampStr); - } catch (DateTimeParseException eee) { - eee.printStackTrace(); - } + return parseNanoSecTimestamp(timeStampStr); } } - return null; } private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java new file mode 100644 index 0000000000000000000000000000000000000000..46f201d1c0a525f52014d133e25fc0db4741050c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java @@ -0,0 +1,139 @@ +package com.taosdata.jdbc; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; + +public class ParameterBindTest { + + private static final String host = "127.0.0.1"; + private static final String stable = "weather"; + + private Connection conn; + private final Random random = new Random(System.currentTimeMillis()); + + @Test + public void test() { + // given + String[] tbnames = {"t1", "t2", "t3"}; + int rows = 10; + + // when + insertIntoTables(tbnames, 10); + + // then + assertRows(stable, tbnames.length * rows); + for (String t : tbnames) { + assertRows(t, rows); + } + } + + @Test + public void testMultiThreads() { + // given + String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}}; + int rows = 10; + + // when + List threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList()); + threads.forEach(Thread::start); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + // then + for (String[] table : tables) { + for (String t : table) { + assertRows(t, rows); + } + } + + } + + private void assertRows(String tbname, int rows) { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + tbname); + while (rs.next()) { + int count = rs.getInt(1); + Assert.assertEquals(rows, count); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void insertIntoTables(String[] tbnames, int rowsEachTable) { + long current = System.currentTimeMillis(); + String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)"; + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 0; i < tbnames.length; i++) { + pstmt.setTableName(tbnames[i]); + pstmt.setTagInt(0, random.nextInt(100)); + pstmt.setTagInt(1, random.nextInt(100)); + + ArrayList timestampList = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + timestampList.add(current + i * 1000 + j); + } + pstmt.setTimestamp(0, timestampList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + f1List.add(random.nextInt(100)); + } + pstmt.setInt(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + f2List.add(random.nextInt(100)); + } + pstmt.setInt(2, f2List); + + pstmt.columnDataAddBatch(); + } + + pstmt.columnDataExecuteBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try { + conn = DriverManager.getConnection(url); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test_pd"); + stmt.execute("create database if not exists test_pd"); + stmt.execute("use test_pd"); + stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { +// Statement stmt = conn.createStatement(); +// stmt.execute("drop database if exists test_pd"); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java new file mode 100644 index 0000000000000000000000000000000000000000..30fc2fa76597c30b905db5c9d49815189d71aaa3 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java @@ -0,0 +1,57 @@ +package com.taosdata.jdbc.rs; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class HttpKeepAliveTest { + + private static final String host = "127.0.0.1"; + + @Test + public void test() throws SQLException { + //given + int multi = 4000; + AtomicInteger exceptionCount = new AtomicInteger(); + + //when + Properties props = new Properties(); + props.setProperty("httpKeepAlive", "false"); + props.setProperty("httpPoolSize", "20"); + Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", props); + + List threads = IntStream.range(0, multi).mapToObj(i -> new Thread( + () -> { + try (Statement stmt = connection.createStatement()) { + stmt.execute("insert into log.tb_not_exists values(now, 1)"); + stmt.execute("select last(*) from log.dn"); + } catch (SQLException throwables) { + exceptionCount.getAndIncrement(); + } + } + )).collect(Collectors.toList()); + + threads.forEach(Thread::start); + + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + //then + Assert.assertEquals(multi, exceptionCount.get()); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java index 693a8f8eb42a29db1d3dd5120dbcb632acc28bb4..a78284b7a2ecf1b43b96180fa9d819e89ecdc595 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java @@ -6,8 +6,7 @@ import java.sql.*; public class WasNullTest { - // private static final String host = "127.0.0.1"; - private static final String host = "master"; + private static final String host = "127.0.0.1"; private Connection conn; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java index cae33f18e7a04e443092d8e696bb32be9600a435..7ba1607fdd32a594bca22528dee48d902736c703 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java @@ -2,8 +2,6 @@ package com.taosdata.jdbc.utils; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.TSDBError; import org.junit.Test; import java.io.UnsupportedEncodingException; @@ -11,7 +9,6 @@ import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.sql.SQLException; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -20,18 +17,21 @@ public class HttpClientPoolUtilTest { String user = "root"; String password = "taosdata"; String host = "127.0.0.1"; - String dbname = "log"; +// String host = "master"; @Test - public void test() { + public void useLog() { // given - List threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> { - useDB(); -// try { -// TimeUnit.SECONDS.sleep(10); -// } catch (InterruptedException e) { -// e.printStackTrace(); -// } + int multi = 10; + + // when + List threads = IntStream.range(0, multi).mapToObj(i -> new Thread(() -> { + try { + String token = login(multi); + executeOneSql("use log", token); + } catch (SQLException | UnsupportedEncodingException e) { + e.printStackTrace(); + } })).collect(Collectors.toList()); threads.forEach(Thread::start); @@ -43,34 +43,62 @@ public class HttpClientPoolUtilTest { e.printStackTrace(); } } - } - private void useDB() { - try { - user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName()); - password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName()); - String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + ""; - String result = HttpClientPoolUtil.execute(loginUrl); - JSONObject jsonResult = JSON.parseObject(result); - String status = jsonResult.getString("status"); - String token = jsonResult.getString("desc"); - if (!status.equals("succ")) { - throw new SQLException(jsonResult.getString("desc")); + @Test + public void tableNotExist() { + // given + int multi = 20; + + // when + List threads = IntStream.range(0, multi * 25).mapToObj(i -> new Thread(() -> { + try { +// String token = "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"; + String token = login(multi); + executeOneSql("insert into log.tb_not_exist values(now, 1)", token); + executeOneSql("select last(*) from log.dn", token); + } catch (SQLException | UnsupportedEncodingException e) { + e.printStackTrace(); } + })).collect(Collectors.toList()); - String url = "http://" + host + ":6041/rest/sql"; - String sql = "use " + dbname; - result = HttpClientPoolUtil.execute(url, sql, token); + threads.forEach(Thread::start); - JSONObject resultJson = JSON.parseObject(result); - if (resultJson.getString("status").equals("error")) { - throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); } - } catch (UnsupportedEncodingException | SQLException e) { - e.printStackTrace(); } } + private String login(int connPoolSize) throws SQLException, UnsupportedEncodingException { + user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName()); + password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName()); + String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + ""; + HttpClientPoolUtil.init(connPoolSize, false); + String result = HttpClientPoolUtil.execute(loginUrl); + JSONObject jsonResult = JSON.parseObject(result); + String status = jsonResult.getString("status"); + String token = jsonResult.getString("desc"); + if (!status.equals("succ")) { + throw new SQLException(jsonResult.getString("desc")); + } + return token; + } + + private boolean executeOneSql(String sql, String token) throws SQLException { + String url = "http://" + host + ":6041/rest/sql"; + String result = HttpClientPoolUtil.execute(url, sql, token); + JSONObject resultJson = JSON.parseObject(result); + if (resultJson.getString("status").equals("error")) { +// HttpClientPoolUtil.reset(); +// throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + return false; + } + return true; + } + } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/resources/commons-logging.properties b/src/connector/jdbc/src/test/resources/commons-logging.properties new file mode 100644 index 0000000000000000000000000000000000000000..ac435a2a1bd64ca9925948d486b453638cb8caac --- /dev/null +++ b/src/connector/jdbc/src/test/resources/commons-logging.properties @@ -0,0 +1,2 @@ +#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog + diff --git a/src/connector/jdbc/src/test/resources/simplelog.properties b/src/connector/jdbc/src/test/resources/simplelog.properties new file mode 100644 index 0000000000000000000000000000000000000000..abcc1ef6d56112c892377ca47453b65ed924a9a9 --- /dev/null +++ b/src/connector/jdbc/src/test/resources/simplelog.properties @@ -0,0 +1,5 @@ +org.apache.commons.logging.simplelog.defaultlog=TRACE +org.apache.commons.logging.simplelog.showlogname=true +org.apache.commons.logging.simplelog.showShortLogname=restful +org.apache.commons.logging.simplelog.showdatetime=true +org.apache.commons.logging.simplelog.dateTimeFormat=yyyy-mm-dd hh:MM:ss.SSS \ No newline at end of file diff --git a/src/connector/python/README.md b/src/connector/python/README.md index b5d841601f20fbad5bdc1464d5d83f512b25dfc4..679735131105739ae59940c29b51f57496a2057d 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -5,14 +5,27 @@ ## Install -```sh -git clone --depth 1 https://github.com/taosdata/TDengine.git -pip install ./TDengine/src/connector/python +You can use `pip` to install the connector from PyPI: + +```bash +pip install taospy +``` + +Or with git url: + +```bash +pip install git+https://github.com/taosdata/taos-connector-python.git +``` + +If you have installed TDengine server or client with prebuilt packages, then you can install the connector from path: + +```bash +pip install /usr/local/taos/connector/python ``` ## Source Code -[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python). +[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Examples diff --git a/src/connector/python/pyproject.toml b/src/connector/python/pyproject.toml index da61cccf49429251d49f2cba495e24e146244c85..69e3351712b647712a88d7067545ea12ed86506d 100644 --- a/src/connector/python/pyproject.toml +++ b/src/connector/python/pyproject.toml @@ -1,10 +1,13 @@ [tool.poetry] -name = "taos" -version = "2.1.1" +name = "taospy" +version = "2.1.2" description = "TDengine connector for python" authors = ["Taosdata Inc. "] license = "AGPL-3.0" readme = "README.md" +packages = [ + {include = "taos"} +] [tool.poetry.dependencies] python = "^2.7 || ^3.4" @@ -12,12 +15,12 @@ typing = "*" [tool.poetry.dev-dependencies] pytest = [ - { version = "^4.6", python = "^2.7" }, - { version = "^6.2", python = "^3.7" } + { version = "^4.6", python = ">=2.7,<3.0" }, + { version = "^6.2", python = ">=3.7,<4.0" } ] pdoc = { version = "^7.1.1", python = "^3.7" } mypy = { version = "^0.910", python = "^3.6" } -black = { version = "^21.7b0", python = "^3.6" } +black = [{ version = "^21.*", python = ">=3.6.2,<4.0" }] [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 2520984e78fad236227d9cf55c29ace92878d3bf..7ebfa8adef6a82c979ad0544a3eb11ccd351b760 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -442,18 +442,14 @@ from .statement import * from .subscription import * from .schemaless import * -try: - import importlib.metadata - - __version__ = importlib.metadata.version("taos") -except: - None +from taos._version import __version__ # Globals threadsafety = 0 paramstyle = "pyformat" __all__ = [ + "__version__", # functions "connect", "new_bind_param", diff --git a/src/connector/python/taos/_version.py b/src/connector/python/taos/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f811561263c557cf534e90ff763373bccacb20b6 --- /dev/null +++ b/src/connector/python/taos/_version.py @@ -0,0 +1 @@ +__version__ = '2.1.2' diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 4365c7eabc509f95525078378ff76d46a884c075..37bc90d4c63fe3f75b12d46bb1bf535441869938 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -2,8 +2,9 @@ import ctypes import platform -import sys +import inspect from ctypes import * + try: from typing import Any except: @@ -14,6 +15,7 @@ from .bind import * from .field import * from .schemaless import * +_UNSUPPORTED = {} # stream callback stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p) @@ -47,10 +49,13 @@ def _load_taos(): "Darwin": _load_taos_darwin, "Windows": _load_taos_windows, } + pf = platform.system() + if load_func[pf] is None: + raise InterfaceError("unsupported platform: %s" % pf) try: - return load_func[platform.system()]() - except: - raise InterfaceError('unsupported platform or failed to load taos client library') + return load_func[pf]() + except Exception as err: + raise InterfaceError("unable to load taos C library: %s" % err) _libtaos = _load_taos() @@ -65,7 +70,6 @@ _libtaos.taos_consume.restype = ctypes.c_void_p _libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int) _libtaos.taos_free_result.restype = None _libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) -_libtaos.taos_schemaless_insert.restype = ctypes.c_void_p try: _libtaos.taos_stmt_errstr.restype = c_char_p @@ -181,6 +185,7 @@ def taos_connect(host=None, user="root", password="taosdata", db=None, port=0): raise ConnectionError("connect to TDengine failed") return connection + _libtaos.taos_connect_auth.restype = c_void_p _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16 @@ -236,6 +241,7 @@ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0): raise ConnectionError("connect to TDengine failed") return connection + _libtaos.taos_query.restype = c_void_p _libtaos.taos_query.argtypes = c_void_p, c_char_p @@ -287,6 +293,7 @@ def taos_affected_rows(result): """The affected rows after runing query""" return _libtaos.taos_affected_rows(result) + subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int) _libtaos.taos_subscribe.restype = c_void_p # _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int @@ -317,7 +324,7 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par _libtaos.taos_consume.restype = c_void_p -_libtaos.taos_consume.argstype = c_void_p, +_libtaos.taos_consume.argstype = (c_void_p,) def taos_consume(sub): @@ -503,13 +510,17 @@ def taos_stop_query(result): return _libtaos.taos_stop_query(result) -_libtaos.taos_load_table_info.restype = c_int -_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p) +try: + _libtaos.taos_load_table_info.restype = c_int + _libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p) +except Exception as err: + _UNSUPPORTED["taos_open_stream"] = err def taos_load_table_info(connection, tables): # type: (ctypes.c_void_p, str) -> None """Stop current query""" + _check_if_supported() errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8"))) if errno != 0: msg = taos_errstr() @@ -562,12 +573,13 @@ def taos_select_db(connection, db): try: _libtaos.taos_open_stream.restype = c_void_p _libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any -except: - pass +except Exception as err: + _UNSUPPORTED["taos_open_stream"] = err def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None): # type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer + _check_if_supported() if callback2 != None: callback2 = stream_callback2_type(callback2) """Open an stream""" @@ -600,6 +612,7 @@ def taos_stmt_init(connection): """ return c_void_p(_libtaos.taos_stmt_init(connection)) + _libtaos.taos_stmt_prepare.restype = c_int _libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int) @@ -618,6 +631,7 @@ def taos_stmt_prepare(stmt, sql): _libtaos.taos_stmt_close.restype = c_int _libtaos.taos_stmt_close.argstype = (c_void_p,) + def taos_stmt_close(stmt): # type: (ctypes.c_void_p) -> None """Close a statement query @@ -627,17 +641,12 @@ def taos_stmt_close(stmt): if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) -try: - _libtaos.taos_stmt_errstr.restype = c_char_p - _libtaos.taos_stmt_errstr.argstype = (c_void_p,) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info()) try: _libtaos.taos_stmt_errstr.restype = c_char_p _libtaos.taos_stmt_errstr.argstype = (c_void_p,) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname"] = err def taos_stmt_errstr(stmt): @@ -645,16 +654,17 @@ def taos_stmt_errstr(stmt): """Get error message from stetement query @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() err = c_char_p(_libtaos.taos_stmt_errstr(stmt)) if err: return err.value.decode("utf-8") + try: _libtaos.taos_stmt_set_tbname.restype = c_int _libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname" % taos_get_client_info()) - +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname"] = err def taos_stmt_set_tbname(stmt, name): @@ -662,15 +672,17 @@ def taos_stmt_set_tbname(stmt, name): """Set table name of a statement query if exists. @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8"))) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_set_tbname_tags.restype = c_int _libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname_tags" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname_tags"] = err def taos_stmt_set_tbname_tags(stmt, name, tags): @@ -678,11 +690,13 @@ def taos_stmt_set_tbname_tags(stmt, name, tags): """Set table name with tags bind params. @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + _libtaos.taos_stmt_is_insert.restype = c_int _libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int)) @@ -702,6 +716,7 @@ def taos_stmt_is_insert(stmt): _libtaos.taos_stmt_num_params.restype = c_int _libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int)) + def taos_stmt_num_params(stmt): # type: (ctypes.c_void_p) -> int """Params number of the current statement query. @@ -713,6 +728,7 @@ def taos_stmt_num_params(stmt): raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) return num_params.value + _libtaos.taos_stmt_bind_param.restype = c_int _libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p) @@ -729,12 +745,12 @@ def taos_stmt_bind_param(stmt, bind): if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_bind_param_batch.restype = c_int _libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_bind_param_batch" % taos_get_client_info()) - +except Exception as err: + _UNSUPPORTED["taos_stmt_bind_param_batch"] = err def taos_stmt_bind_param_batch(stmt, bind): @@ -745,15 +761,17 @@ def taos_stmt_bind_param_batch(stmt, bind): """ # ptr = ctypes.cast(bind, POINTER(TaosMultiBind)) # ptr = pointer(bind) + _check_if_supported() res = _libtaos.taos_stmt_bind_param_batch(stmt, bind) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_bind_single_param_batch.restype = c_int _libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_bind_single_param_batch" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_bind_single_param_batch"] = err def taos_stmt_bind_single_param_batch(stmt, bind, col): @@ -763,6 +781,7 @@ def taos_stmt_bind_single_param_batch(stmt, bind, col): @bind: TAOS_MULTI_BIND* @col: column index """ + _check_if_supported() res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) @@ -810,14 +829,17 @@ def taos_stmt_use_result(stmt): raise StatementError(taos_stmt_errstr(stmt)) return result + try: _libtaos.taos_schemaless_insert.restype = c_void_p _libtaos.taos_schemaless_insert.argstype = c_void_p, c_void_p, c_int, c_int, c_int -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_schemaless_insert" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_schemaless_insert"] = err + def taos_schemaless_insert(connection, lines, protocol, precision): # type: (c_void_p, list[str] | tuple(str), SmlProtocol, SmlPrecision) -> int + _check_if_supported() num_of_lines = len(lines) lines = (c_char_p(line.encode("utf-8")) for line in lines) lines_type = ctypes.c_char_p * num_of_lines @@ -833,6 +855,18 @@ def taos_schemaless_insert(connection, lines, protocol, precision): taos_free_result(res) return affected_rows + +def _check_if_supported(): + func = inspect.stack()[1][3] + if func in _UNSUPPORTED: + raise InterfaceError("C function %s is not supported in v%s: %s" % (func, taos_get_client_info(), _UNSUPPORTED[func])) + + +def unsupported_methods(): + for m, e in range(_UNSUPPORTED): + print("unsupported %s: %s", m, e) + + class CTaosInterface(object): def __init__(self, config=None): """ diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 5b48374e8f7d54bef4d199ff9398aaf6a74b257e..1daff0c75956072e02f8439acac2850b9315235a 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -3,6 +3,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(. SRC) @@ -61,12 +62,22 @@ ENDIF () MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") +LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64) + IF (TD_LINUX) ADD_EXECUTABLE(taosdump ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdump taos_static cJson) + IF (TD_AVRO_SUPPORT) + TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos_static) + ENDIF() ELSE () - TARGET_LINK_LIBRARIES(taosdump taos cJson) + IF (TD_AVRO_SUPPORT) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos) + ENDIF () ENDIF () ENDIF () @@ -74,8 +85,8 @@ IF (TD_DARWIN) # missing for macosx # ADD_EXECUTABLE(taosdump ${SRC}) # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdump taos_static cJson) + # TARGET_LINK_LIBRARIES(taosdump taos_static jansson) # ELSE () - # TARGET_LINK_LIBRARIES(taosdump taos cJson) + # TARGET_LINK_LIBRARIES(taosdump taos jansson) # ENDIF () ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 69ec2968218a9e5b2ca34551c60b6c44256298d2..a1f98dbcd2283f1f8df3daa780ef9dd1c5f608f8 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -28,15 +28,24 @@ #include "tsdb.h" #include "tutil.h" -#define AVRO_SUPPORT 0 -#if AVRO_SUPPORT == 1 +static char **g_tsDumpInSqlFiles = NULL; +static char g_tsCharset[63] = {0}; + +#ifdef AVRO_SUPPORT #include -#endif +#include + +static char **g_tsDumpInAvroFiles = NULL; + +static void print_json_aux(json_t *element, int indent); + +#endif /* AVRO_SUPPORT */ #define TSDB_SUPPORT_NANOSECOND 1 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 +#define MAX_PATH_LEN 4096 // max path length on linux is 4095 #define COMMAND_SIZE 65536 #define MAX_RECORDS_PER_REQ 32766 //#define DEFAULT_DUMP_FILE "taosdump.sql" @@ -46,8 +55,6 @@ static int converStringToReadable(char *str, int size, char *buf, int bufsize); static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); -static void dumpCharset(FILE *fp); -static void loadFileCharset(FILE *fp, char *fcharset); typedef struct { short bytes; @@ -64,7 +71,12 @@ typedef struct { #define performancePrint(fmt, ...) \ do { if (g_args.performance_print) \ - fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) + +#define warnPrint(fmt, ...) \ + do { fprintf(stderr, "\033[33m"); \ + fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); } while(0) #define errorPrint(fmt, ...) \ do { fprintf(stderr, "\033[31m"); \ @@ -208,14 +220,13 @@ typedef struct { typedef struct { pthread_t threadID; int32_t threadIndex; - int32_t totalThreads; char dbName[TSDB_DB_NAME_LEN]; char stbName[TSDB_TABLE_NAME_LEN]; int precision; TAOS *taos; int64_t rowsOfDumpOut; - int64_t tablesOfDumpOut; - int64_t tableFrom; + int64_t count; + int64_t from; } threadInfo; typedef struct { @@ -225,6 +236,44 @@ typedef struct { int32_t totalDatabasesOfDumpOut; } resultStatistics; +#ifdef AVRO_SUPPORT + +enum enAvro_Codec { + AVRO_CODEC_START = 0, + AVRO_CODEC_NULL = AVRO_CODEC_START, + AVRO_CODEC_DEFLATE, + AVRO_CODEC_SNAPPY, + AVRO_CODEC_LZMA, + AVRO_CODEC_UNKNOWN = 255 +}; + +char *g_avro_codec[] = { + "null", + "deflate", + "snappy", + "lzma", + "unknown" +}; + +/* avro sectin begin */ +#define RECORD_NAME_LEN 64 +#define FIELD_NAME_LEN 64 +#define TYPE_NAME_LEN 16 + +typedef struct FieldStruct_S { + char name[FIELD_NAME_LEN]; + char type[TYPE_NAME_LEN]; +} FieldStruct; + +typedef struct RecordSchema_S { + char name[RECORD_NAME_LEN]; + char *fields; + int num_fields; +} RecordSchema; + +/* avro section end */ +#endif + static int64_t g_totalDumpOutRows = 0; SDbInfo **g_dbInfos = NULL; @@ -276,14 +325,17 @@ static struct argp_option options[] = { // dump format options {"schemaonly", 's', 0, 0, "Only dump schema.", 2}, {"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, - {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, - {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, - {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, - {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, - {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, - {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, - {"debug", 'g', 0, 0, "Print debug info.", 8}, +#ifdef AVRO_SUPPORT + {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3}, + {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4}, +#endif + {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8}, + {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9}, + {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10}, + {"debug", 'g', 0, 0, "Print debug info.", 15}, {0} }; @@ -310,7 +362,10 @@ typedef struct arguments { // dump format option bool schemaonly; bool with_property; +#ifdef AVRO_SUPPORT bool avro; + int avro_codec; +#endif int64_t start_time; char humanStartTime[HUMAN_TIME_LEN]; int64_t end_time; @@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0}; static FILE *g_fpOfResult = NULL; static int g_numOfCores = 1; -static int dumpOut(); -static int dumpIn(); -static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, - FILE *fp); -static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, - FILE *fp, char* dbName); -static int getTableDes( - char* dbName, char *table, - TableDef *stableDes, bool isSuperTable); -static int64_t dumpTableData(FILE *fp, char *tbName, - char* dbName, - int precision, - char *jsonAvroSchema); -static int checkParam(); -static void freeDbInfos(); - struct arguments g_args = { // connection option NULL, @@ -381,7 +420,10 @@ struct arguments g_args = { // dump format option false, // schemaonly true, // with_property - false, // avro format +#ifdef AVRO_SUPPORT + false, // avro + AVRO_CODEC_SNAPPY, // avro_codec +#endif -INT64_MAX + 1, // start_time {0}, // humanStartTime INT64_MAX, // end_time @@ -392,7 +434,7 @@ struct arguments g_args = { 1, // table_batch false, // allow_sys // other options - 5, // thread_num + 8, // thread_num 0, // abort NULL, // arg_list 0, // arg_list_len @@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { } break; +#ifdef AVRO_SUPPORT + case 'v': + g_args.avro = true; + break; + + case 'd': + for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) { + if (0 == strcmp(arg, g_avro_codec[i])) { + g_args.avro_codec = i; + break; + } + } + break; +#endif + case 'r': g_args.resultFile = arg; break; @@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'N': g_args.with_property = false; break; - case 'v': - g_args.avro = true; - break; case 'S': // parse time here. break; @@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.abort = 1; break; case ARGP_KEY_ARG: - g_args.arg_list = &state->argv[state->next - 1]; - g_args.arg_list_len = state->argc - state->next + 1; + if (strlen(state->argv[state->next - 1])) { + g_args.arg_list = &state->argv[state->next - 1]; + g_args.arg_list_len = state->argc - state->next + 1; + } state->next = state->argc; break; @@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause( for (; counter < numOfCols; counter++) { if (counter != count_temp) { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); if (tableDes->cols[counter].var_value) { pstr += sprintf(pstr, ", \'%s\'", @@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause( pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); } } else { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); if (tableDes->cols[counter].var_value) { pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value); @@ -1050,1902 +1106,2768 @@ static void dumpCreateMTableClause( free(tmpBuf); } -static int convertTbDesToAvroSchema( - char *dbName, char *tbName, TableDef *tableDes, int colCount, - char **avroSchema) +static int64_t getNtbCountOfStb(char *dbName, char *stbName) { - errorPrint("%s() LN%d TODO: covert table schema to avro schema\n", - __func__, __LINE__); - // { - // "namesapce": "database name", - // "type": "record", - // "name": "table name", - // "fields": [ - // { - // "name": "col0 name", - // "type": "long" - // }, - // { - // "name": "col1 name", - // "type": ["int", "null"] - // }, - // { - // "name": "col2 name", - // "type": ["float", "null"] - // }, - // ... - // { - // "name": "coln name", - // "type": ["string", "null"] - // } - // ] - // } - *avroSchema = (char *)calloc(1, - 17 + TSDB_DB_NAME_LEN /* dbname section */ - + 17 /* type: record */ - + 11 + TSDB_TABLE_NAME_LEN /* tbname section */ - + 10 /* fields section */ - + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */ - if (*avroSchema == NULL) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, + dbName, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); return -1; } - char *pstr = *avroSchema; - pstr += sprintf(pstr, - "{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [", - dbName, tbName); - for (int i = 0; i < colCount; i ++) { - if (0 == i) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "long"); - } else { - if (strcasecmp(tableDes->cols[i].type, "binary") == 0 || - strcasecmp(tableDes->cols[i].type, "nchar") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", - tableDes->cols[i].field, "string"); - } else { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", - tableDes->cols[i].field, tableDes->cols[i].type); - } - } - if ((i != (colCount -1)) - && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) { - pstr += sprintf(pstr, "},"); - } else { - pstr += sprintf(pstr, "}"); - break; - } + int64_t count = 0; + + char command[COMMAND_SIZE]; + + sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); + + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; } - pstr += sprintf(pstr, "]}"); + TAOS_ROW row = NULL; - debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema); + if ((row = taos_fetch_row(res)) != NULL) { + count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; + } - return 0; + taos_close(taos); + return count; } -static int64_t dumpNormalTable( - char *dbName, - char *stable, - char *tbName, - int precision, - FILE *fp - ) { +static int getTableDes( + TAOS *taos, + char* dbName, char *table, + TableDef *tableDes, bool isSuperTable) { + TAOS_ROW row = NULL; + TAOS_RES* res = NULL; int colCount = 0; - TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) - + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table - colCount = getTableDes(dbName, tbName, tableDes, false); - - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } + char sqlstr[COMMAND_SIZE]; + sprintf(sqlstr, "describe %s.%s;", dbName, table); - // create child-table using super-table - dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); - } else { // dump table definition - colCount = getTableDes(dbName, tbName, tableDes, false); + res = taos_query(taos, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } + TAOS_FIELD *fields = taos_fetch_fields(res); - // create normal-table or super-table - dumpCreateTableClause(tableDes, colCount, fp, dbName); + tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); + while ((row = taos_fetch_row(res)) != NULL) { + tstrncpy(tableDes->cols[colCount].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + min(TSDB_COL_NAME_LEN + 1, + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); + tstrncpy(tableDes->cols[colCount].type, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1)); + tableDes->cols[colCount].length = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(tableDes->cols[colCount].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(COL_NOTE_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); + colCount++; } - char *jsonAvroSchema = NULL; - if (g_args.avro) { - if (0 != convertTbDesToAvroSchema( - dbName, tbName, tableDes, colCount, &jsonAvroSchema)) { - errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n", - __func__, - __LINE__); - freeTbDes(tableDes); - return -1; - } - } + taos_free_result(res); + res = NULL; - int64_t ret = 0; - if (!g_args.schemaonly) { - ret = dumpTableData(fp, tbName, dbName, precision, - jsonAvroSchema); + if (isSuperTable) { + return colCount; } - tfree(jsonAvroSchema); - freeTbDes(tableDes); - return ret; -} + // if child-table have tag, using select tagName from table to get tagValue + for (int i = 0 ; i < colCount; i++) { + if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; -static int64_t dumpNormalTableBelongStb( - SDbInfo *dbInfo, char *stbName, char *ntbName) -{ - int64_t count = 0; + sprintf(sqlstr, "select %s from %s.%s", + tableDes->cols[i].field, dbName, table); - char tmpBuf[4096] = {0}; - FILE *fp = NULL; + res = taos_query(taos, sqlstr); + code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } + fields = taos_fetch_fields(res); - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; - } + row = taos_fetch_row(res); + if (NULL == row) { + errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } - count = dumpNormalTable( - dbInfo->name, - stbName, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); + if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { + sprintf(tableDes->cols[i].note, "%s", "NUL"); + sprintf(tableDes->cols[i].value, "%s", "NULL"); + taos_free_result(res); + res = NULL; + continue; + } - fclose(fp); - return count; -} + int32_t* length = taos_fetch_lengths(res); -static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName) -{ - int64_t count = 0; - - char tmpBuf[4096] = {0}; - FILE *fp = NULL; + //int32_t* length = taos_fetch_lengths(tmpResult); + switch (fields[0].type) { + case TSDB_DATA_TYPE_BOOL: + sprintf(tableDes->cols[i].value, "%d", + ((((int32_t)(*((char *) + row[TSDB_SHOW_TABLES_NAME_INDEX])))==1) + ?1:0)); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(tableDes->cols[i].value, "%d", + *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(tableDes->cols[i].value, "%d", + *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_INT: + sprintf(tableDes->cols[i].value, "%d", + *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(tableDes->cols[i].value, "%" PRId64 "", + *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(tableDes->cols[i].value, "%f", + GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(tableDes->cols[i].value, "%f", + GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_BINARY: + memset(tableDes->cols[i].value, 0, + sizeof(tableDes->cols[i].value)); + int len = strlen((char *)row[0]); + // FIXME for long value + if (len < (COL_VALUEBUF_LEN - 2)) { + converStringToReadable( + (char *)row[0], + length[0], + tableDes->cols[i].value, + len); + } else { + tableDes->cols[i].var_value = calloc(1, len * 2); + if (tableDes->cols[i].var_value == NULL) { + errorPrint("%s() LN%d, memory alalocation failed!\n", + __func__, __LINE__); + taos_free_result(res); + return -1; + } + converStringToReadable((char *)row[0], + length[0], + (char *)(tableDes->cols[i].var_value), len); + } + break; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } + case TSDB_DATA_TYPE_NCHAR: + { + memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note)); + char tbuf[COMMAND_SIZE-2]; // need reserve 2 bytes for ' ' + convertNCharToReadable( + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + length[0], tbuf, COMMAND_SIZE-2); + sprintf(tableDes->cols[i].value, "%s", tbuf); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); +#if 0 + if (!g_args.mysqlFlag) { + sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } +#endif + break; + default: + break; + } - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; + taos_free_result(res); } - count = dumpNormalTable( - dbInfo->name, - NULL, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); - - fclose(fp); - return count; + return colCount; } -static void *dumpNtbOfDb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); - debugPrint("dump table count = \t%"PRId64"\n", - pThreadInfo->tablesOfDumpOut); +static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, + FILE *fp, char* dbName) { + int counter = 0; + int count_temp = 0; + char sqlstr[COMMAND_SIZE]; - FILE *fp = NULL; - char tmpBuf[4096] = {0}; + char* pstr = sqlstr; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%d.sql", - g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%d.sql", - pThreadInfo->dbName, pThreadInfo->threadIndex); - } + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", + dbName, tableDes->name); - fp = fopen(tmpBuf, "w"); + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; - } + if (counter == 0) { + pstr += sprintf(pstr, " (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } - int64_t count; - for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) { - debugPrint("[%d] No.\t%"PRId64" table name: %s\n", - pThreadInfo->threadIndex, i, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name); - count = dumpNormalTable( - pThreadInfo->dbName, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name, - pThreadInfo->precision, - fp); - if (count < 0) { - break; + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); } } - fclose(fp); - return NULL; -} - -static void *dumpNormalTablesOfStb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); - debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut); - - char command[COMMAND_SIZE]; + count_temp = counter; - sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", - pThreadInfo->dbName, pThreadInfo->stbName, - pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom); + for (; counter < numOfCols; counter++) { + if (counter == count_temp) { + pstr += sprintf(pstr, ") TAGS (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - int32_t code = taos_errno(res); - if (code) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return NULL; + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } } - FILE *fp = NULL; - char tmpBuf[4096] = {0}; + pstr += sprintf(pstr, ");"); - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.%d.sql", - g_args.outpath, - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%s.%d.sql", - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } + debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr); + return fprintf(fp, "%s\n\n", sqlstr); +} - fp = fopen(tmpBuf, "w"); +static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp) +{ + uint64_t sizeOfTableDes = + (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; + TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes); + if (NULL == tableDes) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, sizeOfTableDes); + exit(-1); } - TAOS_ROW row = NULL; - int64_t i = 0; - int64_t count; - while((row = taos_fetch_row(res)) != NULL) { - debugPrint("[%d] sub table %"PRId64": name: %s\n", - pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + int colCount = getTableDes(taos, dbInfo->name, + stbName, tableDes, true); - count = dumpNormalTable( - pThreadInfo->dbName, - pThreadInfo->stbName, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - pThreadInfo->precision, - fp); - if (count < 0) { - break; - } + if (colCount < 0) { + free(tableDes); + errorPrint("%s() LN%d, failed to get stable[%s] schema\n", + __func__, __LINE__, stbName); + exit(-1); } - fclose(fp); - return NULL; + dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); + free(tableDes); + + return 0; } -static int64_t dumpNtbOfDbByThreads( - SDbInfo *dbInfo, - int64_t ntbCount) +static int64_t dumpCreateSTableClauseOfDb( + SDbInfo *dbInfo, FILE *fp) { - if (ntbCount <= 0) { + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbInfo->name, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbInfo->name); return 0; } - int threads = g_args.thread_num; + TAOS_ROW row; + char command[COMMAND_SIZE] = {0}; - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } + sprintf(command, "SHOW %s.STABLES", dbInfo->name); - assert(threads); - int64_t b = ntbCount % threads; - - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - assert(pids); - assert(infos); - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); + TAOS_RES* res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + exit(-1); + } - return -1; + int64_t superTblCnt = 0; + while ((row = taos_fetch_row(res)) != NULL) { + if (0 == dumpStableClasuse(taos, dbInfo, + row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { + superTblCnt ++; } - - pThreadInfo->threadIndex = i; - pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->tableFrom + - ((threadInfo *)(infos + i - 1))->tablesOfDumpOut; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo); } - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + taos_free_result(res); - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } + fprintf(g_fpOfResult, + "# super table counter: %"PRId64"\n", + superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; - free(pids); - free(infos); + taos_close(taos); - return 0; + return superTblCnt; } -static int64_t getNtbCountOfStb(char *dbName, char *stbName) -{ - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, - dbName, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return -1; - } +static void dumpCreateDbClause( + SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - int64_t count = 0; + char *pstr = sqlstr; + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); + if (isDumpProperty) { + pstr += sprintf(pstr, + "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->replica, dbInfo->quorum, dbInfo->days, + dbInfo->keeplist, + dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, + dbInfo->fsync, + dbInfo->cachelast, + dbInfo->comp, dbInfo->precision, dbInfo->update); + } - char command[COMMAND_SIZE]; + pstr += sprintf(pstr, ";"); + fprintf(fp, "%s\n\n", sqlstr); +} - sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); +static FILE* openDumpInFile(char *fptr) { + wordexp_t full_path; - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; + if (wordexp(fptr, &full_path, 0) != 0) { + errorPrint("illegal file name: %s\n", fptr); + return NULL; } - TAOS_ROW row = NULL; + char *fname = full_path.we_wordv[0]; - if ((row = taos_fetch_row(res)) != NULL) { - count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; + FILE *f = NULL; + if ((fname) && (strlen(fname) > 0)) { + f = fopen(fname, "r"); + if (f == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, fname); + } } - taos_close(taos); - return count; + wordfree(&full_path); + return f; } -static int64_t dumpNtbOfStbByThreads( - SDbInfo *dbInfo, char *stbName) +static uint64_t getFilesNum(char *ext) { - int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); + uint64_t count = 0; - if (ntbCount <= 0) { - return 0; - } - - int threads = g_args.thread_num; - - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } + int namelen, extlen; + struct dirent *pDirent; + DIR *pDir; - assert(threads); - int64_t b = ntbCount % threads; + extlen = strlen(ext); - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - assert(pids); - assert(infos); + bool isSql = (0 == strcmp(ext, "sql")); - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); + pDir = opendir(g_args.inpath); + if (pDir != NULL) { + while ((pDirent = readdir(pDir)) != NULL) { + namelen = strlen (pDirent->d_name); - return -1; + if (namelen > extlen) { + if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { + if (isSql) { + if (0 == strcmp(pDirent->d_name, "dbs.sql")) { + continue; + } + } + verbosePrint("%s found\n", pDirent->d_name); + count ++; + } + } } - - pThreadInfo->threadIndex = i; - pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->tableFrom + - ((threadInfo *)(infos + i - 1))->tablesOfDumpOut; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - strcpy(pThreadInfo->stbName, stbName); - pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo); - } - - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); + closedir (pDir); } - int64_t records = 0; - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - records += pThreadInfo->rowsOfDumpOut; - taos_close(pThreadInfo->taos); - } - - free(pids); - free(infos); - - return records; + debugPrint("%"PRId64" .%s files found!\n", count, ext); + return count; } -static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp) +static void freeFileList(char **fileList, int64_t count) { - uint64_t sizeOfTableDes = - (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes); - if (NULL == tableDes) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, sizeOfTableDes); - exit(-1); - } - - int colCount = getTableDes(dbInfo->name, - stbName, tableDes, true); - - if (colCount < 0) { - free(tableDes); - errorPrint("%s() LN%d, failed to get stable[%s] schema\n", - __func__, __LINE__, stbName); - exit(-1); + for (int64_t i = 0; i < count; i++) { + tfree(fileList[i]); } - - dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); - free(tableDes); - - return 0; + tfree(fileList); } -static int64_t dumpCreateSTableClauseOfDb( - SDbInfo *dbInfo, FILE *fp) +static void createDumpinList(char *ext, int64_t count) { - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; - } - - TAOS_ROW row; - char command[COMMAND_SIZE] = {0}; - - sprintf(command, "SHOW %s.STABLES", dbInfo->name); + bool isSql = (0 == strcmp(ext, "sql")); - TAOS_RES* res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - exit(-1); - } + if (isSql) { + g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *)); + assert(g_tsDumpInSqlFiles); - int64_t superTblCnt = 0; - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { - superTblCnt ++; + for (int64_t i = 0; i < count; i++) { + g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + assert(g_tsDumpInSqlFiles[i]); } } +#ifdef AVRO_SUPPORT + else { + g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *)); + assert(g_tsDumpInAvroFiles); - taos_free_result(res); + for (int64_t i = 0; i < count; i++) { + g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + assert(g_tsDumpInAvroFiles[i]); + } - fprintf(g_fpOfResult, - "# super table counter: %"PRId64"\n", - superTblCnt); - g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + } +#endif - taos_close(taos); + int namelen, extlen; + struct dirent *pDirent; + DIR *pDir; + + extlen = strlen(ext); + + count = 0; + pDir = opendir(g_args.inpath); + if (pDir != NULL) { + while ((pDirent = readdir(pDir)) != NULL) { + namelen = strlen (pDirent->d_name); + + if (namelen > extlen) { + if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { + verbosePrint("%s found\n", pDirent->d_name); + if (isSql) { + if (0 == strcmp(pDirent->d_name, "dbs.sql")) { + continue; + } + strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); + } +#ifdef AVRO_SUPPORT + else { + strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); + } +#endif + } + } + } + closedir (pDir); + } - return superTblCnt; + debugPrint("%"PRId64" .%s files filled to list!\n", count, ext); } -static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) -{ - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; - } +#ifdef AVRO_SUPPORT - char command[COMMAND_SIZE]; - TAOS_RES *result; - int32_t code; +static int convertTbDesToJson( + char *dbName, char *tbName, TableDef *tableDes, int colCount, + char **jsonSchema) +{ + // { + // "type": "record", + // "name": "dbname.tbname", + // "fields": [ + // { + // "name": "col0 name", + // "type": "long" + // }, + // { + // "name": "col1 name", + // "type": "int" + // }, + // { + // "name": "col2 name", + // "type": "float" + // }, + // { + // "name": "col3 name", + // "type": "boolean" + // }, + // ... + // { + // "name": "coln name", + // "type": "string" + // } + // ] + // } + *jsonSchema = (char *)calloc(1, + 17 + TSDB_DB_NAME_LEN /* dbname section */ + + 17 /* type: record */ + + 11 + TSDB_TABLE_NAME_LEN /* tbname section */ + + 10 /* fields section */ + + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */ + if (*jsonSchema == NULL) { + errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + return -1; + } - sprintf(command, "USE %s", dbInfo->name); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("invalid database %s, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; + char *pstr = *jsonSchema; + pstr += sprintf(pstr, + "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [", + dbName, tbName); + for (int i = 0; i < colCount; i ++) { + if (0 == i) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else { + if (strcasecmp(tableDes->cols[i].type, "binary") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "string"); + } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "bytes"); + } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "boolean"); + } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "int"); + } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "int"); + } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, + strtolower(tableDes->cols[i].type, tableDes->cols[i].type)); + } + } + if ((i != (colCount -1)) + && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) { + pstr += sprintf(pstr, "},"); + } else { + pstr += sprintf(pstr, "}"); + break; + } } - sprintf(command, "SHOW TABLES"); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("Failed to show %s\'s tables, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; + pstr += sprintf(pstr, "]}"); + + debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema); + + return 0; +} + +static void print_json_indent(int indent) { + int i; + for (i = 0; i < indent; i++) { + putchar(' '); } +} - g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); +const char *json_plural(size_t count) { return count == 1 ? "" : "s"; } - TAOS_ROW row; - int64_t count = 0; - while(NULL != (row = taos_fetch_row(result))) { - debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", - __func__, __LINE__, - count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - tstrncpy(((TableInfo *)(g_tablesList + count))->name, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN); - char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; - if (stbName) { - tstrncpy(((TableInfo *)(g_tablesList + count))->stable, - (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); - ((TableInfo *)(g_tablesList + count))->belongStb = true; - } - count ++; +static void print_json_object(json_t *element, int indent) { + size_t size; + const char *key; + json_t *value; + + print_json_indent(indent); + size = json_object_size(element); + + printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size)); + json_object_foreach(element, key, value) { + print_json_indent(indent + 2); + printf("JSON Key: \"%s\"\n", key); + print_json_aux(value, indent + 2); } - taos_close(taos); +} - int64_t records = dumpNtbOfDbByThreads(dbInfo, count); +static void print_json_array(json_t *element, int indent) { + size_t i; + size_t size = json_array_size(element); + print_json_indent(indent); - free(g_tablesList); - g_tablesList = NULL; + printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size)); + for (i = 0; i < size; i++) { + print_json_aux(json_array_get(element, i), indent + 2); + } +} - return records; +static void print_json_string(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON String: \"%s\"\n", json_string_value(element)); } -static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) -{ - dumpCreateDbClause(dbInfo, g_args.with_property, fp); +static void print_json_integer(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element)); +} - fprintf(g_fpOfResult, "\n#### database: %s\n", - dbInfo->name); - g_resultStatistics.totalDatabasesOfDumpOut++; +static void print_json_real(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Real: %f\n", json_real_value(element)); +} - dumpCreateSTableClauseOfDb(dbInfo, fp); +static void print_json_true(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON True\n"); +} - return dumpNTablesOfDb(dbInfo); +static void print_json_false(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON False\n"); } -static int dumpOut() { - TAOS *taos = NULL; - TAOS_RES *result = NULL; +static void print_json_null(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON Null\n"); +} - TAOS_ROW row; - FILE *fp = NULL; - int32_t count = 0; +static void print_json_aux(json_t *element, int indent) +{ + switch(json_typeof(element)) { + case JSON_OBJECT: + print_json_object(element, indent); + break; - char tmpBuf[4096] = {0}; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); - } else { - sprintf(tmpBuf, "dbs.sql"); - } + case JSON_ARRAY: + print_json_array(element, indent); + break; - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; + case JSON_STRING: + print_json_string(element, indent); + break; + + case JSON_INTEGER: + print_json_integer(element, indent); + break; + + case JSON_REAL: + print_json_real(element, indent); + break; + + case JSON_TRUE: + print_json_true(element, indent); + break; + + case JSON_FALSE: + print_json_false(element, indent); + break; + + case JSON_NULL: + print_json_null(element, indent); + break; + + default: + fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element)); } +} - g_args.dumpDbCount = getDumpDbCount(); - debugPrint("%s() LN%d, dump db count: %d\n", - __func__, __LINE__, g_args.dumpDbCount); +static void print_json(json_t *root) { print_json_aux(root, 0); } - if (0 == g_args.dumpDbCount) { - errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); - fclose(fp); - return -1; +static json_t *load_json(char *jsonbuf) +{ + json_t *root; + json_error_t error; + + root = json_loads(jsonbuf, 0, &error); + + if (root) { + return root; + } else { + fprintf(stderr, "json error on line %d: %s\n", error.line, error.text); + return NULL; } +} - g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); - if (g_dbInfos == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", +static RecordSchema *parse_json_to_recordschema(json_t *element) +{ + RecordSchema *recordSchema = malloc(sizeof(RecordSchema)); + assert(recordSchema); + + if (JSON_OBJECT != json_typeof(element)) { + fprintf(stderr, "%s() LN%d, json passed is not an object\n", __func__, __LINE__); - goto _exit_failure; + return NULL; } - char command[COMMAND_SIZE]; + const char *key; + json_t *value; + + json_object_foreach(element, key, value) { + if (0 == strcmp(key, "name")) { + tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1); + } else if (0 == strcmp(key, "fields")) { + if (JSON_ARRAY == json_typeof(value)) { + + size_t i; + size_t size = json_array_size(value); + + verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n", + __func__, __LINE__, + (long long)size, json_plural(size)); + + recordSchema->num_fields = size; + recordSchema->fields = malloc(sizeof(FieldStruct) * size); + assert(recordSchema->fields); + + for (i = 0; i < size; i++) { + FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); + json_t *arr_element = json_array_get(value, i); + const char *ele_key; + json_t *ele_value; + + json_object_foreach(arr_element, ele_key, ele_value) { + if (0 == strcmp(ele_key, "name")) { + tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1); + } else if (0 == strcmp(ele_key, "type")) { + if (JSON_STRING == json_typeof(ele_value)) { + tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1); + } else if (JSON_OBJECT == json_typeof(ele_value)) { + const char *obj_key; + json_t *obj_value; + + json_object_foreach(ele_value, obj_key, obj_value) { + if (0 == strcmp(obj_key, "type")) { + if (JSON_STRING == json_typeof(obj_value)) { + tstrncpy(field->type, + json_string_value(obj_value), TYPE_NAME_LEN-1); + } + } + } + } + } + } + } + } else { + fprintf(stderr, "%s() LN%d, fields have no array\n", + __func__, __LINE__); + return NULL; + } - /* Connect to server */ - taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - goto _exit_failure; + break; + } } - /* --------------------------------- Main Code -------------------------------- */ - /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ - /* */ - dumpCharset(fp); + return recordSchema; +} - sprintf(command, "show databases"); - result = taos_query(taos, command); - int32_t code = taos_errno(result); +static void freeRecordSchema(RecordSchema *recordSchema) +{ + if (recordSchema) { + if (recordSchema->fields) { + free(recordSchema->fields); + } + free(recordSchema); + } +} - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - goto _exit_failure; +static int64_t writeResultToAvro( + char *avroFilename, + char *jsonSchema, + TAOS_RES *res) +{ + avro_schema_t schema; + if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) { + errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n", + __func__, __LINE__, jsonSchema, avro_strerror()); + exit(EXIT_FAILURE); } - TAOS_FIELD *fields = taos_fetch_fields(result); + json_t *json_root = load_json(jsonSchema); + debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); - while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'log', but subsequent version changed to 'log' - if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - && (!g_args.allow_sys)) { - continue; + RecordSchema *recordSchema; + if (json_root) { + if (g_args.debug_print || g_args.verbose_print) { + print_json(json_root); } - if (g_args.databases) { // input multi dbs - if (inDatabasesSeq( - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { - continue; - } - } else if (!g_args.all_databases) { // only input one db - if (strncasecmp(g_args.arg_list[0], - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) - continue; + recordSchema = parse_json_to_recordschema(json_root); + if (NULL == recordSchema) { + fprintf(stderr, "Failed to parse json to recordschema\n"); + exit(EXIT_FAILURE); } - g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (g_dbInfos[count] == NULL) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); - goto _exit_failure; - } + json_decref(json_root); + } else { + errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema); + exit(EXIT_FAILURE); + } - okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); - tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - min(TSDB_DB_NAME_LEN, - fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1)); - if (g_args.with_property) { - g_dbInfos[count]->ntables = - *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - g_dbInfos[count]->vgroups = - *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - g_dbInfos[count]->replica = - *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - g_dbInfos[count]->quorum = - *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - g_dbInfos[count]->days = - *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(g_dbInfos[count]->keeplist, - (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); - //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - //g_dbInfos[count]->daysToKeep1; - //g_dbInfos[count]->daysToKeep2; - g_dbInfos[count]->cache = - *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - g_dbInfos[count]->blocks = - *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - g_dbInfos[count]->minrows = - *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - g_dbInfos[count]->maxrows = - *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - g_dbInfos[count]->wallevel = - *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - g_dbInfos[count]->fsync = - *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - g_dbInfos[count]->comp = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - g_dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(g_dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - DB_PRECISION_LEN); - g_dbInfos[count]->update = - *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - } - count++; + avro_file_writer_t db; - if (g_args.databases) { - if (count > g_args.dumpDbCount) - break; - } else if (!g_args.all_databases) { - if (count >= 1) - break; - } + int rval = avro_file_writer_create_with_codec + (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0); + if (rval) { + errorPrint("There was an error creating %s. reason: %s\n", + avroFilename, avro_strerror()); + exit(EXIT_FAILURE); } - if (count == 0) { - errorPrint("%d databases valid to dump\n", count); - goto _exit_failure; - } + TAOS_ROW row = NULL; - taos_close(taos); + int numFields = taos_field_count(res); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(res); - if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases - for (int i = 0; i < count; i++) { - int64_t records = 0; - records = dumpWholeDatabase(g_dbInfos[i], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[i]->name); - g_totalDumpOutRows += records; - } - } - } else { - if (1 == g_args.arg_list_len) { - int64_t records = dumpWholeDatabase(g_dbInfos[0], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[0]->name); - g_totalDumpOutRows += records; - } - } else { - dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); - } + avro_value_iface_t *wface = + avro_generic_class_from_schema(schema); - int superTblCnt = 0 ; - for (int i = 1; g_args.arg_list[i]; i++) { - TableRecordInfo tableRecordInfo; + avro_value_t record; + avro_generic_value_new(wface, &record); - if (getTableRecordInfo(g_dbInfos[0]->name, - g_args.arg_list[i], - &tableRecordInfo) < 0) { - errorPrint("input the invalid table %s\n", - g_args.arg_list[i]); + int64_t count = 0; + while ((row = taos_fetch_row(res)) != NULL) { + avro_value_t value; + + for (int col = 0; col < numFields; col++) { + if (0 != avro_value_get_by_name( + &record, fields[col].name, &value, NULL)) { + errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed", + __func__, __LINE__, fields[col].name); continue; } - int64_t records = 0; - if (tableRecordInfo.isStb) { // dump all table of this stable - int ret = dumpStableClasuse( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - if (ret >= 0) { - superTblCnt++; - records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); - } - } else if (tableRecordInfo.belongStb){ - dumpStableClasuse( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - records = dumpNormalTableBelongStb( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - g_args.arg_list[i]); - } else { - records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]); - } + int len; + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_BOOL_NULL); + } else { + avro_value_set_boolean(&value, + ((((int32_t)(*((char *)row[col])))==1)?1:0)); + } + break; - if (records >= 0) { - okPrint("table: %s dumped\n", g_args.arg_list[i]); - g_totalDumpOutRows += records; + case TSDB_DATA_TYPE_TINYINT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL); + } else { + avro_value_set_int(&value, *((int8_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL); + } else { + avro_value_set_int(&value, *((int16_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_INT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_INT_NULL); + } else { + avro_value_set_int(&value, *((int32_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_BIGINT: + if (NULL == row[col]) { + avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); + } else { + avro_value_set_long(&value, *((int64_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (NULL == row[col]) { + avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL); + } else { + avro_value_set_float(&value, GET_FLOAT_VAL(row[col])); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (NULL == row[col]) { + avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL); + } else { + avro_value_set_double(&value, GET_DOUBLE_VAL(row[col])); + } + break; + + case TSDB_DATA_TYPE_BINARY: + if (NULL == row[col]) { + avro_value_set_string(&value, + (char *)NULL); + } else { + avro_value_set_string(&value, (char *)row[col]); + } + break; + + case TSDB_DATA_TYPE_NCHAR: + if (NULL == row[col]) { + avro_value_set_bytes(&value, + (void*)NULL,0); + } else { + len = strlen((char*)row[col]); + avro_value_set_bytes(&value, (void*)(row[col]),len); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + if (NULL == row[col]) { + avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); + } else { + avro_value_set_long(&value, *((int64_t *)row[col])); + } + break; + + default: + break; } } + + if (0 != avro_file_writer_append_value(db, &record)) { + errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n", + __func__, __LINE__, + avro_strerror()); + } else { + count ++; + } } - /* Close the handle and return */ - fclose(fp); - taos_free_result(result); - freeDbInfos(); - fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return 0; + avro_value_decref(&record); + avro_value_iface_decref(wface); + freeRecordSchema(recordSchema); + avro_file_writer_close(db); + avro_schema_decref(schema); -_exit_failure: - fclose(fp); - taos_close(taos); - taos_free_result(result); - freeDbInfos(); - errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return -1; + return count; } -static int getTableDes( - char* dbName, char *table, - TableDef *tableDes, bool isSuperTable) { - TAOS_ROW row = NULL; - TAOS_RES* res = NULL; - int colCount = 0; +void freeBindArray(char *bindArray, int onlyCol) +{ + TAOS_BIND *bind; - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbName, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbName); + for (int j = 0; j < onlyCol; j++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j)); + if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type) + && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) { + tfree(bind->buffer); + } + } +} + +static int dumpInOneAvroFile(char* fcharset, + char* encode, char *avroFilepath) +{ + debugPrint("avroFilepath: %s\n", avroFilepath); + + avro_file_reader_t reader; + + if(avro_file_reader(avroFilepath, &reader)) { + fprintf(stderr, "Unable to open avro file %s: %s\n", + avroFilepath, avro_strerror()); return -1; } - char sqlstr[COMMAND_SIZE]; - sprintf(sqlstr, "describe %s.%s;", dbName, table); + int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4; + char *jsonbuf = calloc(1, buf_len); + assert(jsonbuf); - res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); + avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);; + + avro_schema_t schema; + schema = avro_file_reader_get_writer_schema(reader); + avro_schema_to_json(schema, jsonwriter); + + if (0 == strlen(jsonbuf)) { + errorPrint("Failed to parse avro file: %s schema. reason: %s\n", + avroFilepath, avro_strerror()); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); return -1; } + debugPrint("Schema:\n %s\n", jsonbuf); - TAOS_FIELD *fields = taos_fetch_fields(res); + json_t *json_root = load_json(jsonbuf); + debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); + if (g_args.debug_print) { + print_json(json_root); + } - tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while ((row = taos_fetch_row(res)) != NULL) { - tstrncpy(tableDes->cols[colCount].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - min(TSDB_COL_NAME_LEN + 1, - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); - tstrncpy(tableDes->cols[colCount].type, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1)); - tableDes->cols[colCount].length = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(tableDes->cols[colCount].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(COL_NOTE_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); - colCount++; + const char *namespace = avro_schema_namespace((const avro_schema_t)schema); + debugPrint("Namespace: %s\n", namespace); + + TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, + namespace, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + return -1; } - taos_free_result(res); - res = NULL; + TAOS_STMT *stmt = taos_stmt_init(taos); + if (NULL == stmt) { + taos_close(taos); + errorPrint("%s() LN%d, stmt init failed! reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); + return -1; + } - if (isSuperTable) { - return colCount; + RecordSchema *recordSchema = parse_json_to_recordschema(json_root); + if (NULL == recordSchema) { + errorPrint("Failed to parse json to recordschema. reason: %s\n", + avro_strerror()); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; } + json_decref(json_root); - // if child-table have tag, using select tagName from table to get tagValue - for (int i = 0 ; i < colCount; i++) { - if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; + TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) + + sizeof(ColDes) * TSDB_MAX_COLUMNS); - sprintf(sqlstr, "select %s from %s.%s", - tableDes->cols[i].field, dbName, table); + int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false); - res = taos_query(taos, sqlstr); - code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } + if (allColCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + recordSchema->name); + free(tableDes); + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } - fields = taos_fetch_fields(res); + char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); + assert(stmtBuffer); + char *pstr = stmtBuffer; + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - row = taos_fetch_row(res); - if (NULL == row) { - errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } + int onlyCol = 1; // at least timestamp + for (int col = 1; col < allColCount; col++) { + if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue; + pstr += sprintf(pstr, ",?"); + onlyCol ++; + } + pstr += sprintf(pstr, ")"); - if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { - sprintf(tableDes->cols[i].note, "%s", "NUL"); - sprintf(tableDes->cols[i].value, "%s", "NULL"); - taos_free_result(res); - res = NULL; - continue; - } + if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) { + errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n", + taos_stmt_errstr(stmt)); - int32_t* length = taos_fetch_lengths(res); + free(stmtBuffer); + free(tableDes); + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } - //int32_t* length = taos_fetch_lengths(tmpResult); - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[i].value, "%d", - ((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[i].value, "%d", - *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[i].value, "%d", - *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[i].value, "%d", - *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[i].value, "%" PRId64 "", - *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[i].value, "%f", - GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[i].value, "%f", - GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BINARY: - memset(tableDes->cols[i].value, 0, - sizeof(tableDes->cols[i].value)); - int len = strlen((char *)row[0]); - // FIXME for long value - if (len < (COL_VALUEBUF_LEN - 2)) { - converStringToReadable( - (char *)row[0], - length[0], - tableDes->cols[i].value, - len); - } else { - tableDes->cols[i].var_value = calloc(1, len * 2); - if (tableDes->cols[i].var_value == NULL) { - errorPrint("%s() LN%d, memory alalocation failed!\n", - __func__, __LINE__); - taos_free_result(res); - return -1; + if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) { + errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n", + recordSchema->name, taos_stmt_errstr(stmt)); + + free(stmtBuffer); + free(tableDes); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } + + avro_value_iface_t *value_class = avro_generic_class_from_schema(schema); + avro_value_t value; + avro_generic_value_new(value_class, &value); + + char *bindArray = + malloc(sizeof(TAOS_BIND) * onlyCol); + assert(bindArray); + + int success = 0; + int failed = 0; + while(!avro_file_reader_read_value(reader, &value)) { + memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol); + TAOS_BIND *bind; + + for (int i = 0; i < recordSchema->num_fields; i++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + + avro_value_t field_value; + + FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); + + bind->is_null = NULL; + int is_null = 1; + if (0 == i) { + int64_t *ts = malloc(sizeof(int64_t)); + assert(ts); + + avro_value_get_by_name(&value, field->name, &field_value, NULL); + avro_value_get_long(&field_value, ts); + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = ts; + bind->length = &bind->buffer_length; + } else if (0 == avro_value_get_by_name( + &value, field->name, &field_value, NULL)) { + + if (0 == strcasecmp(tableDes->cols[i].type, "int")) { + int32_t *n32 = malloc(sizeof(int32_t)); + assert(n32); + + avro_value_get_int(&field_value, n32); + debugPrint("%d | ", *n32); + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = n32; + } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) { + int32_t *n8 = malloc(sizeof(int32_t)); + assert(n8); + + avro_value_get_int(&field_value, n8); + debugPrint("%d | ", *n8); + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = (int8_t *)n8; + } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) { + int32_t *n16 = malloc(sizeof(int32_t)); + assert(n16); + + avro_value_get_int(&field_value, n16); + debugPrint("%d | ", *n16); + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = (int32_t*)n16; + } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) { + int64_t *n64 = malloc(sizeof(int64_t)); + assert(n64); + + avro_value_get_long(&field_value, n64); + debugPrint("%"PRId64" | ", *n64); + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = n64; + } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) { + int64_t *n64 = malloc(sizeof(int64_t)); + assert(n64); + + avro_value_get_long(&field_value, n64); + debugPrint("%"PRId64" | ", *n64); + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = n64; + } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) { + float *f = malloc(sizeof(float)); + assert(f); + + avro_value_get_float(&field_value, f); + if (TSDB_DATA_FLOAT_NULL == *f) { + debugPrint("%s | ", "NULL"); + bind->is_null = &is_null; + } else { + debugPrint("%f | ", *f); + bind->buffer = f; } - converStringToReadable((char *)row[0], - length[0], - (char *)(tableDes->cols[i].var_value), len); + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) { + double *dbl = malloc(sizeof(double)); + assert(dbl); + + avro_value_get_double(&field_value, dbl); + if (TSDB_DATA_DOUBLE_NULL == *dbl) { + debugPrint("%s | ", "NULL"); + bind->is_null = &is_null; + } else { + debugPrint("%f | ", *dbl); + bind->buffer = dbl; + } + bind->buffer = dbl; + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) { + size_t size; + + char *buf = NULL; + avro_value_get_string(&field_value, (const char **)&buf, &size); + debugPrint("%s | ", (char *)buf); + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + bind->buffer_length = tableDes->cols[i].length; + bind->buffer = buf; + } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) { + size_t bytessize; + void *bytesbuf = NULL; + + avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize); + debugPrint("%s | ", (char*)bytesbuf); + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + bind->buffer_length = tableDes->cols[i].length; + bind->buffer = bytesbuf; + } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) { + int32_t *bl = malloc(sizeof(int32_t)); + assert(bl); + + avro_value_get_boolean(&field_value, bl); + debugPrint("%s | ", (*bl)?"true":"false"); + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = (int8_t*)bl; } - break; - case TSDB_DATA_TYPE_NCHAR: - { - memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note)); - char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' - convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN); - sprintf(tableDes->cols[i].value, "%s", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); -#if 0 - if (!g_args.mysqlFlag) { - sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000)); - } -#endif - break; - default: - break; + bind->length = &bind->buffer_length; + } + } + debugPrint("%s", "\n"); - taos_free_result(res); + if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + freeBindArray(bindArray, onlyCol); + failed --; + continue; + } + if (0 != taos_stmt_add_batch(stmt)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + freeBindArray(bindArray, onlyCol); + failed --; + continue; + } + + freeBindArray(bindArray, onlyCol); + + success ++; + continue; + } + + if (0 != taos_stmt_execute(stmt)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + failed = success; } + avro_value_decref(&value); + avro_value_iface_decref(value_class); + + tfree(bindArray); + + tfree(stmtBuffer); + tfree(tableDes); + + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + + tfree(jsonbuf); + + taos_stmt_close(stmt); taos_close(taos); - return colCount; + + if (failed < 0) + return failed; + return success; } -static void dumpCreateDbClause( - SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; +static void* dumpInAvroWorkThreadFp(void *arg) +{ + threadInfo *pThread = (threadInfo*)arg; + setThreadName("dumpInAvroWorkThrd"); + verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n", + pThread->threadIndex, pThread->count, pThread->from); + + for (int64_t i = 0; i < pThread->count; i++) { + char avroFile[MAX_PATH_LEN]; + sprintf(avroFile, "%s/%s", g_args.inpath, + g_tsDumpInAvroFiles[pThread->from + i]); + + if (0 == dumpInOneAvroFile(g_tsCharset, + g_args.encode, + avroFile)) { + okPrint("[%d] Success dump in file: %s\n", + pThread->threadIndex, avroFile); + } + } - char *pstr = sqlstr; - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); - if (isDumpProperty) { - pstr += sprintf(pstr, - "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", - dbInfo->replica, dbInfo->quorum, dbInfo->days, - dbInfo->keeplist, - dbInfo->cache, - dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, - dbInfo->fsync, - dbInfo->cachelast, - dbInfo->comp, dbInfo->precision, dbInfo->update); + return NULL; +} + +static int64_t dumpInAvroWorkThreads() +{ + int64_t ret = 0; + + int32_t threads = g_args.thread_num; + + uint64_t avroFileCount = getFilesNum("avro"); + if (0 == avroFileCount) { + debugPrint("No .avro file found in %s\n", g_args.inpath); + return 0; } - pstr += sprintf(pstr, ";"); - fprintf(fp, "%s\n\n", sqlstr); + createDumpinList("avro", avroFileCount); + + threadInfo *pThread; + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = (threadInfo *)calloc( + threads, sizeof(threadInfo)); + assert(pids); + assert(infos); + + int64_t a = avroFileCount / threads; + if (a < 1) { + threads = avroFileCount; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = avroFileCount % threads; + } + + int64_t from = 0; + + for (int32_t t = 0; t < threads; ++t) { + pThread = infos + t; + pThread->threadIndex = t; + + pThread->from = from; + pThread->count = tcount; + verbosePrint( + "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n", + t, pThread->count, pThread->from); + + if (pthread_create(pids + t, NULL, + dumpInAvroWorkThreadFp, (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread[%d] failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(EXIT_FAILURE); + } + } + + for (int t = 0; t < threads; ++t) { + pthread_join(pids[t], NULL); + } + + free(infos); + free(pids); + + freeFileList(g_tsDumpInAvroFiles, avroFileCount); + + return ret; } -static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, - FILE *fp, char* dbName) { - int counter = 0; - int count_temp = 0; - char sqlstr[COMMAND_SIZE]; +#endif /* AVRO_SUPPORT */ - char* pstr = sqlstr; +static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) +{ + int64_t totalRows = 0; - pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", - dbName, tableDes->name); + int32_t sql_buf_len = g_args.max_sql_len; + char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); + assert(tmpBuffer); - for (; counter < numOfCols; counter++) { - if (tableDes->cols[counter].note[0] != '\0') break; + char *pstr = tmpBuffer; - if (counter == 0) { - pstr += sprintf(pstr, " (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); + TAOS_ROW row = NULL; + int rowFlag = 0; + int64_t lastRowsPrint = 5000000; + int count = 0; + + int numFields = taos_field_count(res); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(res); + + int32_t curr_sqlstr_len = 0; + int32_t total_sqlstr_len = 0; + + while ((row = taos_fetch_row(res)) != NULL) { + curr_sqlstr_len = 0; + + int32_t* length = taos_fetch_lengths(res); // act len + + if (count == 0) { + total_sqlstr_len = 0; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "INSERT INTO %s.%s VALUES (", dbName, tbName); } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); + if (g_args.mysqlFlag) { + if (0 == rowFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + rowFlag++; + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); + } + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + } } - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); - } + for (int col = 0; col < numFields; col++) { + if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + + if (row[col] == NULL) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); + continue; + } + + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + ((((int32_t)(*((char *)row[col])))==1)?1:0)); + break; + + case TSDB_DATA_TYPE_TINYINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int8_t *)row[col])); + break; + + case TSDB_DATA_TYPE_SMALLINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int16_t *)row[col])); + break; + + case TSDB_DATA_TYPE_INT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int32_t *)row[col])); + break; + + case TSDB_DATA_TYPE_BIGINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "%" PRId64 "", + *((int64_t *)row[col])); + break; + + case TSDB_DATA_TYPE_FLOAT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", + GET_FLOAT_VAL(row[col])); + break; + + case TSDB_DATA_TYPE_DOUBLE: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", + GET_DOUBLE_VAL(row[col])); + break; + + case TSDB_DATA_TYPE_BINARY: + { + char tbuf[COMMAND_SIZE] = {0}; + converStringToReadable((char *)row[col], length[col], + tbuf, COMMAND_SIZE); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_NCHAR: + { + char tbuf[COMMAND_SIZE] = {0}; + convertNCharToReadable((char *)row[col], length[col], + tbuf, COMMAND_SIZE); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + if (!g_args.mysqlFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "%" PRId64 "", + *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s.%03d\'", + buf, (int)(ts % 1000)); + } + break; + default: + break; + } + } + + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); + + totalRows++; + count++; + fprintf(fp, "%s", tmpBuffer); + + if (totalRows >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from %s.%s\n", + totalRows, dbName, tbName); + lastRowsPrint += 5000000; + } + + total_sqlstr_len += curr_sqlstr_len; + + if ((count >= g_args.data_batch) + || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + fprintf(fp, ";\n"); + count = 0; + } + } + + debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); + + fprintf(fp, "\n"); + free(tmpBuffer); + + return totalRows; +} + +static int64_t dumpTableData(FILE *fp, char *tbName, + char* dbName, int precision, + char *jsonSchema) { + int64_t totalRows = 0; + + char sqlstr[1024] = {0}; + + int64_t start_time, end_time; + if (strlen(g_args.humanStartTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanStartTime, &start_time, + strlen(g_args.humanStartTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", + g_args.humanStartTime); + return -1; + } + } else { + start_time = g_args.start_time; + } + + if (strlen(g_args.humanEndTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanEndTime); + return -1; + } + } else { + end_time = g_args.end_time; + } + + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + dbName, tbName, start_time, end_time); + + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbName, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbName); + return -1; + } + + TAOS_RES* res = taos_query(taos, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("failed to run command %s, reason: %s\n", + sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } + +#ifdef AVRO_SUPPORT + if (g_args.avro) { + char avroFilename[MAX_PATH_LEN] = {0}; + + if (g_args.outpath[0] != 0) { + sprintf(avroFilename, "%s/%s.%s.avro", + g_args.outpath, dbName, tbName); + } else { + sprintf(avroFilename, "%s.%s.avro", + dbName, tbName); + } + + totalRows = writeResultToAvro(avroFilename, jsonSchema, res); + } else +#endif + totalRows = writeResultToSql(res, fp, dbName, tbName); + + taos_free_result(res); + taos_close(taos); + return totalRows; +} + +static int64_t dumpNormalTable( + TAOS *taos, + char *dbName, + char *stable, + char *tbName, + int precision, + FILE *fp + ) { + int colCount = 0; + + TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) + + sizeof(ColDes) * TSDB_MAX_COLUMNS); + + if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table + colCount = getTableDes(taos, dbName, tbName, tableDes, false); + + if (colCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + tbName); + free(tableDes); + return -1; + } + + // create child-table using super-table + dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); + } else { // dump table definition + colCount = getTableDes(taos, dbName, tbName, tableDes, false); + + if (colCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + tbName); + free(tableDes); + return -1; + } + + // create normal-table or super-table + dumpCreateTableClause(tableDes, colCount, fp, dbName); + } + + char *jsonSchema = NULL; +#ifdef AVRO_SUPPORT + if (g_args.avro) { + if (0 != convertTbDesToJson( + dbName, tbName, tableDes, colCount, &jsonSchema)) { + errorPrint("%s() LN%d, convertTbDesToJson failed\n", + __func__, + __LINE__); + freeTbDes(tableDes); + return -1; + } + } +#endif + + int64_t totalRows = 0; + if (!g_args.schemaonly) { + totalRows = dumpTableData(fp, tbName, dbName, precision, + jsonSchema); + } + + tfree(jsonSchema); + freeTbDes(tableDes); + return totalRows; +} + +static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName) +{ + int64_t count = 0; + + char tmpBuf[MAX_PATH_LEN] = {0}; + FILE *fp = NULL; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.sql", + g_args.outpath, dbInfo->name, ntbName); + } else { + sprintf(tmpBuf, "%s.%s.sql", + dbInfo->name, ntbName); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } + + count = dumpNormalTable( + taos, + dbInfo->name, + NULL, + ntbName, + getPrecisionByString(dbInfo->precision), + fp); + if (count > 0) { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + fclose(fp); + return count; +} + +static int64_t dumpNormalTableBelongStb( + TAOS *taos, + SDbInfo *dbInfo, char *stbName, char *ntbName) +{ + int64_t count = 0; + + char tmpBuf[MAX_PATH_LEN] = {0}; + FILE *fp = NULL; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.sql", + g_args.outpath, dbInfo->name, ntbName); + } else { + sprintf(tmpBuf, "%s.%s.sql", + dbInfo->name, ntbName); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } + + count = dumpNormalTable( + taos, + dbInfo->name, + stbName, + ntbName, + getPrecisionByString(dbInfo->precision), + fp); + if (count > 0) { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + + fclose(fp); + return count; +} + +static void *dumpNtbOfDb(void *arg) { + threadInfo *pThreadInfo = (threadInfo *)arg; + + debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); + debugPrint("dump table count = \t%"PRId64"\n", + pThreadInfo->count); + + FILE *fp = NULL; + char tmpBuf[MAX_PATH_LEN] = {0}; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%d.sql", + g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex); + } else { + sprintf(tmpBuf, "%s.%d.sql", + pThreadInfo->dbName, pThreadInfo->threadIndex); + } + + fp = fopen(tmpBuf, "w"); + + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return NULL; + } + + int64_t count; + for (int64_t i = 0; i < pThreadInfo->count; i++) { + debugPrint("[%d] No.\t%"PRId64" table name: %s\n", + pThreadInfo->threadIndex, i, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name); + count = dumpNormalTable( + pThreadInfo->taos, + pThreadInfo->dbName, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name, + pThreadInfo->precision, + fp); + if (count < 0) { + break; + } else { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + } + + fclose(fp); + return NULL; +} + +static int checkParam() { + if (g_args.all_databases && g_args.databases) { + errorPrint("%s", "conflict option --all-databases and --databases\n"); + return -1; + } + + if (g_args.start_time > g_args.end_time) { + errorPrint("%s", "start time is larger than end time\n"); + return -1; + } + + if (g_args.arg_list_len == 0) { + if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { + errorPrint("%s", "taosdump requires parameters\n"); + return -1; + } + } + /* + if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } + */ + if (!g_args.isDumpIn && g_args.encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + if (g_args.table_batch <= 0) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + return 0; +} + +/* +static bool isEmptyCommand(char *cmd) { + char *pchar = cmd; + + while (*pchar != '\0') { + if (*pchar != ' ') return false; + pchar++; + } + + return true; +} + +static void taosReplaceCtrlChar(char *str) { + bool ctrlOn = false; + char *pstr = NULL; + + for (pstr = str; *str != '\0'; ++str) { + if (ctrlOn) { + switch (*str) { + case 'n': + *pstr = '\n'; + pstr++; + break; + case 'r': + *pstr = '\r'; + pstr++; + break; + case 't': + *pstr = '\t'; + pstr++; + break; + case '\\': + *pstr = '\\'; + pstr++; + break; + case '\'': + *pstr = '\''; + pstr++; + break; + default: + break; + } + ctrlOn = false; + } else { + if (*str == '\\') { + ctrlOn = true; + } else { + *pstr = *str; + pstr++; + } + } + } + + *pstr = '\0'; +} +*/ + +char *ascii_literal_list[] = { + "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", + "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", + "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", + "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", + "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", + "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", + "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", + "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", + "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", + "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", + "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", + "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", + "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", + "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", + "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", + "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + +static int converStringToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + while (size > 0) { + if (*pstr == '\0') break; + pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); + pstr++; + size--; } + *pbuf = '\0'; + return 0; +} - count_temp = counter; - - for (; counter < numOfCols; counter++) { - if (counter == count_temp) { - pstr += sprintf(pstr, ") TAGS (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); +static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + wchar_t wc; + while (size > 0) { + if (*pstr == '\0') break; + int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); + if (byte_width < 0) { + errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); + exit(-1); } - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + if ((int)wc < 256) { + pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + } else { + memcpy(pbuf, pstr, byte_width); + pbuf += byte_width; } + pstr += byte_width; } - pstr += sprintf(pstr, ");"); - - debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr); - return fprintf(fp, "%s\n\n", sqlstr); -} + *pbuf = '\0'; -static int writeSchemaToAvro(char *jsonAvroSchema) -{ - errorPrint("%s() LN%d, TODO: implement write schema to avro", - __func__, __LINE__); return 0; } -static int64_t writeResultToAvro(TAOS_RES *res) -{ - errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__); - return 0; +static void dumpCharset(FILE *fp) { + char charsetline[256]; + + (void)fseek(fp, 0, SEEK_SET); + sprintf(charsetline, "#!%s\n", tsCharset); + (void)fwrite(charsetline, strlen(charsetline), 1, fp); } -static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) -{ - int64_t totalRows = 0; +static void loadFileCharset(FILE *fp, char *fcharset) { + char * line = NULL; + size_t line_size = 0; - int32_t sql_buf_len = g_args.max_sql_len; - char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); - if (tmpBuffer == NULL) { - errorPrint("failed to allocate %d memory\n", sql_buf_len + 128); - return -1; + (void)fseek(fp, 0, SEEK_SET); + ssize_t size = getline(&line, &line_size, fp); + if (size <= 2) { + goto _exit_no_charset; } - char *pstr = tmpBuffer; - - TAOS_ROW row = NULL; - int numFields = 0; - int rowFlag = 0; - int64_t lastRowsPrint = 5000000; - int count = 0; + if (strncmp(line, "#!", 2) != 0) { + goto _exit_no_charset; + } + if (line[size - 1] == '\n') { + line[size - 1] = '\0'; + size--; + } + strcpy(fcharset, line + 2); - numFields = taos_field_count(res); - assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(res); + tfree(line); + return; - int32_t curr_sqlstr_len = 0; - int32_t total_sqlstr_len = 0; +_exit_no_charset: + (void)fseek(fp, 0, SEEK_SET); + *fcharset = '\0'; + tfree(line); + return; +} - while ((row = taos_fetch_row(res)) != NULL) { - curr_sqlstr_len = 0; +// ======== dumpIn support multi threads functions ================================// - int32_t* length = taos_fetch_lengths(res); // act len +static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset, + char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; - if (count == 0) { - total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "INSERT INTO %s.%s VALUES (", dbName, tbName); - } else { - if (g_args.mysqlFlag) { - if (0 == rowFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - rowFlag++; - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); - } - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - } - } + cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); + if (cmd == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - for (int col = 0; col < numFields; col++) { - if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + int lastRowsPrint = 5000000; + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; + line[--read_len] = '\0'; - if (row[col] == NULL) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); - continue; - } + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } - switch (fields[col].type) { - case TSDB_DATA_TYPE_BOOL: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); - break; - case TSDB_DATA_TYPE_SMALLINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); - break; - case TSDB_DATA_TYPE_INT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); - break; - case TSDB_DATA_TYPE_BIGINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", - *((int64_t *)row[col])); - break; - case TSDB_DATA_TYPE_FLOAT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); - break; - case TSDB_DATA_TYPE_DOUBLE: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); - break; - case TSDB_DATA_TYPE_BINARY: - { - char tbuf[COMMAND_SIZE] = {0}; - converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_NCHAR: - { - char tbuf[COMMAND_SIZE] = {0}; - convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - if (!g_args.mysqlFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", - *(int64_t *)row[col]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[col]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", - buf, (int)(ts % 1000)); - } - break; - default: - break; - } + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; } - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); + memcpy(cmd + cmd_len, line, read_len); + cmd[read_len + cmd_len]= '\0'; + if (queryDbImpl(taos, cmd)) { + errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", + __func__, __LINE__, lineNo, fileName); + fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); + } - totalRows++; - count++; - fprintf(fp, "%s", tmpBuffer); + memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); + cmd_len = 0; - if (totalRows >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from %s.%s\n", - totalRows, dbName, tbName); + if (lineNo >= lastRowsPrint) { + printf(" %d lines already be executed from file %s\n", lineNo, fileName); lastRowsPrint += 5000000; } + } - total_sqlstr_len += curr_sqlstr_len; + tfree(cmd); + tfree(line); + return 0; +} - if ((count >= g_args.data_batch) - || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { - fprintf(fp, ";\n"); - count = 0; +static void* dumpInSqlWorkThreadFp(void *arg) +{ + threadInfo *pThread = (threadInfo*)arg; + setThreadName("dumpInSqlWorkThrd"); + fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n", + pThread->threadIndex, pThread->count, pThread->from); + + for (int64_t i = 0; i < pThread->count; i++) { + char sqlFile[MAX_PATH_LEN]; + sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]); + + FILE* fp = openDumpInFile(sqlFile); + if (NULL == fp) { + errorPrint("[%d] Failed to open input file: %s\n", + pThread->threadIndex, sqlFile); + continue; } + + if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode, + sqlFile)) { + okPrint("[%d] Success dump in file: %s\n", + pThread->threadIndex, sqlFile); + } + fclose(fp); } - debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); + return NULL; +} - fprintf(fp, "\n"); - atomic_add_fetch_64(&g_totalDumpOutRows, totalRows); - free(tmpBuffer); +static int dumpInSqlWorkThreads() +{ + int32_t threads = g_args.thread_num; - return 0; -} + uint64_t sqlFileCount = getFilesNum("sql"); + if (0 == sqlFileCount) { + debugPrint("No .sql file found in %s\n", g_args.inpath); + return 0; + } -static int64_t dumpTableData(FILE *fp, char *tbName, - char* dbName, int precision, - char *jsonAvroSchema) { - int64_t totalRows = 0; + createDumpinList("sql", sqlFileCount); + + threadInfo *pThread; + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = (threadInfo *)calloc( + threads, sizeof(threadInfo)); + assert(pids); + assert(infos); + + int64_t a = sqlFileCount / threads; + if (a < 1) { + threads = sqlFileCount; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = sqlFileCount % threads; + } - char sqlstr[1024] = {0}; + int64_t from = 0; - int64_t start_time, end_time; - if (strlen(g_args.humanStartTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", g_args.humanStartTime); + for (int32_t t = 0; t < threads; ++t) { + pThread = infos + t; + pThread->threadIndex = t; + + pThread->from = from; + pThread->count = tcount; + verbosePrint( + "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n", + t, pThread->count, pThread->from); + + pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (pThread->taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(infos); + free(pids); return -1; } - } else { - start_time = g_args.start_time; - } - if (strlen(g_args.humanEndTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", g_args.humanEndTime); - return -1; + if (pthread_create(pids + t, NULL, + dumpInSqlWorkThreadFp, (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread[%d] failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(EXIT_FAILURE); } - } else { - end_time = g_args.end_time; } - sprintf(sqlstr, - "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbName, start_time, end_time); - - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbName, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbName); - return -1; + for (int t = 0; t < threads; ++t) { + pthread_join(pids[t], NULL); } - TAOS_RES* res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("failed to run command %s, reason: %s\n", - sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; + for (int t = 0; t < threads; ++t) { + taos_close(infos[t].taos); } + free(infos); + free(pids); - if (g_args.avro) { - writeSchemaToAvro(jsonAvroSchema); - totalRows = writeResultToAvro(res); - } else { - totalRows = writeResultToSql(res, fp, dbName, tbName); - } + freeFileList(g_tsDumpInSqlFiles, sqlFileCount); - taos_free_result(res); - taos_close(taos); - return totalRows; + return 0; } -static int checkParam() { - if (g_args.all_databases && g_args.databases) { - errorPrint("%s", "conflict option --all-databases and --databases\n"); - return -1; - } +static int dumpInDbs() +{ + TAOS *taos = taos_connect( + g_args.host, g_args.user, g_args.password, + NULL, g_args.port); - if (g_args.start_time > g_args.end_time) { - errorPrint("%s", "start time is larger than end time\n"); + if (taos == NULL) { + errorPrint("%s() LN%d, failed to connect to TDengine server\n", + __func__, __LINE__); return -1; } - if (g_args.arg_list_len == 0) { - if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { - errorPrint("%s", "taosdump requires parameters\n"); - return -1; - } - } - /* - if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file path\n"); - return -1; - } - */ - if (!g_args.isDumpIn && g_args.encode != NULL) { - fprintf(stderr, "invalid option in dump out\n"); + char dbsSql[MAX_PATH_LEN]; + sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql"); + + FILE *fp = openDumpInFile(dbsSql); + if (NULL == fp) { + errorPrint("%s() LN%d, failed to open input file %s\n", + __func__, __LINE__, dbsSql); return -1; } + debugPrint("Success Open input file: %s\n", dbsSql); + loadFileCharset(fp, g_tsCharset); - if (g_args.table_batch <= 0) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; + if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) { + okPrint("Success dump in file: %s !\n", dbsSql); } + fclose(fp); + taos_close(taos); + return 0; } -/* -static bool isEmptyCommand(char *cmd) { - char *pchar = cmd; +static int64_t dumpIn() { + assert(g_args.isDumpIn); - while (*pchar != '\0') { - if (*pchar != ' ') return false; - pchar++; - } + int64_t ret = 0; + if (dumpInDbs()) { + errorPrint("%s", "Failed to dump dbs in!\n"); + exit(EXIT_FAILURE); + } - return true; + ret = dumpInSqlWorkThreads(); + +#ifdef AVRO_SUPPORT + if (0 == ret) { + ret = dumpInAvroWorkThreads(); + } +#endif + + return ret; } -static void taosReplaceCtrlChar(char *str) { - bool ctrlOn = false; - char *pstr = NULL; +static void *dumpNormalTablesOfStb(void *arg) { + threadInfo *pThreadInfo = (threadInfo *)arg; - for (pstr = str; *str != '\0'; ++str) { - if (ctrlOn) { - switch (*str) { - case 'n': - *pstr = '\n'; - pstr++; - break; - case 'r': - *pstr = '\r'; - pstr++; - break; - case 't': - *pstr = '\t'; - pstr++; - break; - case '\\': - *pstr = '\\'; - pstr++; - break; - case '\'': - *pstr = '\''; - pstr++; - break; - default: - break; - } - ctrlOn = false; - } else { - if (*str == '\\') { - ctrlOn = true; - } else { - *pstr = *str; - pstr++; - } + debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); + debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count); + + char command[COMMAND_SIZE]; + + sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", + pThreadInfo->dbName, pThreadInfo->stbName, + pThreadInfo->count, pThreadInfo->from); + + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + int32_t code = taos_errno(res); + if (code) { + errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return NULL; } - } - *pstr = '\0'; -} -*/ + FILE *fp = NULL; + char tmpBuf[MAX_PATH_LEN] = {0}; -char *ascii_literal_list[] = { - "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", - "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", - "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", - "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", - "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", - "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", - "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", - "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", - "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", - "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", - "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", - "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", - "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", - "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", - "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", - "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", - "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.%d.sql", + g_args.outpath, + pThreadInfo->dbName, + pThreadInfo->stbName, + pThreadInfo->threadIndex); + } else { + sprintf(tmpBuf, "%s.%s.%d.sql", + pThreadInfo->dbName, + pThreadInfo->stbName, + pThreadInfo->threadIndex); + } -static int converStringToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - while (size > 0) { - if (*pstr == '\0') break; - pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); - pstr++; - size--; + fp = fopen(tmpBuf, "w"); + + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return NULL; } - *pbuf = '\0'; - return 0; -} -static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - wchar_t wc; - while (size > 0) { - if (*pstr == '\0') break; - int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); - if (byte_width < 0) { - errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); - exit(-1); - } + TAOS_ROW row = NULL; + int64_t i = 0; + int64_t count; + while((row = taos_fetch_row(res)) != NULL) { + debugPrint("[%d] sub table %"PRId64": name: %s\n", + pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - if ((int)wc < 256) { - pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + count = dumpNormalTable( + pThreadInfo->taos, + pThreadInfo->dbName, + pThreadInfo->stbName, + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + pThreadInfo->precision, + fp); + if (count < 0) { + break; } else { - memcpy(pbuf, pstr, byte_width); - pbuf += byte_width; + atomic_add_fetch_64(&g_totalDumpOutRows, count); } - pstr += byte_width; } - *pbuf = '\0'; + fclose(fp); + return NULL; +} + +static int64_t dumpNtbOfDbByThreads( + SDbInfo *dbInfo, + int64_t ntbCount) +{ + if (ntbCount <= 0) { + return 0; + } + + int threads = g_args.thread_num; - return 0; -} + int64_t a = ntbCount / threads; + if (a < 1) { + threads = ntbCount; + a = 1; + } -static void dumpCharset(FILE *fp) { - char charsetline[256]; + assert(threads); + int64_t b = ntbCount % threads; - (void)fseek(fp, 0, SEEK_SET); - sprintf(charsetline, "#!%s\n", tsCharset); - (void)fwrite(charsetline, strlen(charsetline), 1, fp); -} + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + assert(pids); + assert(infos); -static void loadFileCharset(FILE *fp, char *fcharset) { - char * line = NULL; - size_t line_size = 0; + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->taos = taos_connect( + g_args.host, + g_args.user, + g_args.password, + dbInfo->name, + g_args.port + ); + if (NULL == pThreadInfo->taos) { + errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + __func__, + __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); - (void)fseek(fp, 0, SEEK_SET); - ssize_t size = getline(&line, &line_size, fp); - if (size <= 2) { - goto _exit_no_charset; + return -1; + } + + pThreadInfo->threadIndex = i; + pThreadInfo->count = (ifrom = (i==0)?0: + ((threadInfo *)(infos + i - 1))->from + + ((threadInfo *)(infos + i - 1))->count; + strcpy(pThreadInfo->dbName, dbInfo->name); + pThreadInfo->precision = getPrecisionByString(dbInfo->precision); + + pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo); } - if (strncmp(line, "#!", 2) != 0) { - goto _exit_no_charset; + for (int64_t i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); } - if (line[size - 1] == '\n') { - line[size - 1] = '\0'; - size--; + + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); } - strcpy(fcharset, line + 2); - tfree(line); - return; + free(pids); + free(infos); -_exit_no_charset: - (void)fseek(fp, 0, SEEK_SET); - *fcharset = '\0'; - tfree(line); - return; + return 0; } -// ======== dumpIn support multi threads functions ================================// - -static char **g_tsDumpInSqlFiles = NULL; -static int32_t g_tsSqlFileNum = 0; -static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0}; -static char g_tsCharset[64] = {0}; - -static int taosGetFilesNum(const char *directoryName, - const char *prefix, const char *prefix2) +static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) { - char cmd[1024] = { 0 }; + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbInfo->name, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbInfo->name); + return 0; + } - if (prefix2) - sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ", - directoryName, prefix, directoryName, prefix2); - else - sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + char command[COMMAND_SIZE]; + TAOS_RES *result; + int32_t code; - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); + sprintf(command, "USE %s", dbInfo->name); + result = taos_query(taos, command); + code = taos_errno(result); + if (code != 0) { + errorPrint("invalid database %s, reason: %s\n", + dbInfo->name, taos_errstr(result)); + taos_close(taos); + return 0; } - int fileNum = 0; - if (fscanf(fp, "%d", &fileNum) != 1) { - errorPrint("failed to execute:%s, parse result error\n", cmd); - exit(-1); + sprintf(command, "SHOW TABLES"); + result = taos_query(taos, command); + code = taos_errno(result); + if (code != 0) { + errorPrint("Failed to show %s\'s tables, reason: %s\n", + dbInfo->name, taos_errstr(result)); + taos_close(taos); + return 0; } - if (fileNum <= 0) { - errorPrint("directory:%s is empty\n", directoryName); - exit(-1); + g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); + assert(g_tablesList); + + TAOS_ROW row; + int64_t count = 0; + while(NULL != (row = taos_fetch_row(result))) { + debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", + __func__, __LINE__, + count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + tstrncpy(((TableInfo *)(g_tablesList + count))->name, + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN); + char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; + if (stbName) { + tstrncpy(((TableInfo *)(g_tablesList + count))->stable, + (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); + ((TableInfo *)(g_tablesList + count))->belongStb = true; + } + count ++; } + taos_close(taos); + + int64_t records = dumpNtbOfDbByThreads(dbInfo, count); + + free(g_tablesList); + g_tablesList = NULL; - pclose(fp); - return fileNum; + return records; } -static void taosParseDirectory(const char *directoryName, - const char *prefix, const char *prefix2, - char **fileArray, int totalFiles) +static int64_t dumpNtbOfStbByThreads( + SDbInfo *dbInfo, char *stbName) { - char cmd[1024] = { 0 }; + int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); - if (prefix2) { - sprintf(cmd, "ls %s/*.%s %s/*.%s | sort", - directoryName, prefix, directoryName, prefix2); - } else { - sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + if (ntbCount <= 0) { + return 0; } - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); - } + int threads = g_args.thread_num; - int fileNum = 0; - while (fscanf(fp, "%128s", fileArray[fileNum++])) { - if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) { - fileNum--; - } - if (fileNum >= totalFiles) { - break; - } + int64_t a = ntbCount / threads; + if (a < 1) { + threads = ntbCount; + a = 1; } - if (fileNum != totalFiles) { - errorPrint("directory:%s changed while read\n", directoryName); - pclose(fp); - exit(-1); - } + assert(threads); + int64_t b = ntbCount % threads; - pclose(fp); -} + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(pids); + assert(infos); -static void taosCheckDatabasesSQLFile(const char *directoryName) -{ - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/dbs.sql", directoryName); + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->taos = taos_connect( + g_args.host, + g_args.user, + g_args.password, + dbInfo->name, + g_args.port + ); + if (NULL == pThreadInfo->taos) { + errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + __func__, + __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); - } + return -1; + } - while (fscanf(fp, "%128s", g_tsDbSqlFile)) { - break; - } + pThreadInfo->threadIndex = i; + pThreadInfo->count = (ifrom = (i==0)?0: + ((threadInfo *)(infos + i - 1))->from + + ((threadInfo *)(infos + i - 1))->count; + strcpy(pThreadInfo->dbName, dbInfo->name); + pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - pclose(fp); -} + strcpy(pThreadInfo->stbName, stbName); + pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo); + } -static void taosMallocDumpFiles() -{ - g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*)); - for (int i = 0; i < g_tsSqlFileNum; i++) { - g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + for (int64_t i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); } -} -static void freeDumpFiles() -{ - for (int i = 0; i < g_tsSqlFileNum; i++) { - tfree(g_tsDumpInSqlFiles[i]); + int64_t records = 0; + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + records += pThreadInfo->rowsOfDumpOut; + taos_close(pThreadInfo->taos); } - tfree(g_tsDumpInSqlFiles); + + free(pids); + free(infos); + + return records; } -static void taosGetDirectoryFileList(char *inputDir) +static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) { - struct stat fileStat; - if (stat(inputDir, &fileStat) < 0) { - errorPrint("%s not exist\n", inputDir); - exit(-1); - } + dumpCreateDbClause(dbInfo, g_args.with_property, fp); - if (fileStat.st_mode & S_IFDIR) { - taosCheckDatabasesSQLFile(inputDir); - if (g_args.avro) - g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro"); - else - g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL); + fprintf(g_fpOfResult, "\n#### database: %s\n", + dbInfo->name); + g_resultStatistics.totalDatabasesOfDumpOut++; - int tsSqlFileNumOfTbls = g_tsSqlFileNum; - if (g_tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; - } - taosMallocDumpFiles(); - if (0 != tsSqlFileNumOfTbls) { - if (g_args.avro) { - taosParseDirectory(inputDir, "sql", "avro", - g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); - } else { - taosParseDirectory(inputDir, "sql", NULL, - g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); - } - } - fprintf(stdout, "\nstart to dispose %d files in %s\n", - g_tsSqlFileNum, inputDir); - } else { - errorPrint("%s is not a directory\n", inputDir); - exit(-1); - } -} + dumpCreateSTableClauseOfDb(dbInfo, fp); -static FILE* taosOpenDumpInFile(char *fptr) { - wordexp_t full_path; + return dumpNTablesOfDb(dbInfo); +} - if (wordexp(fptr, &full_path, 0) != 0) { - errorPrint("illegal file name: %s\n", fptr); - return NULL; - } +static int dumpOut() { + TAOS *taos = NULL; + TAOS_RES *result = NULL; - char *fname = full_path.we_wordv[0]; + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; - FILE *f = NULL; - if ((fname) && (strlen(fname) > 0)) { - f = fopen(fname, "r"); - if (f == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, fname); - } + char tmpBuf[MAX_PATH_LEN] = {0}; + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); } - wordfree(&full_path); - return f; -} + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } -static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, - char* encode, char* fileName) { - int read_len = 0; - char * cmd = NULL; - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; + g_args.dumpDbCount = getDumpDbCount(); + debugPrint("%s() LN%d, dump db count: %d\n", + __func__, __LINE__, g_args.dumpDbCount); - cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); - if (cmd == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", - __func__, __LINE__); + if (0 == g_args.dumpDbCount) { + errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); + fclose(fp); return -1; } - int lastRowsPrint = 5000000; - int lineNo = 0; - while ((read_len = getline(&line, &line_len, fp)) != -1) { - ++lineNo; - if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; - line[--read_len] = '\0'; + g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); + if (g_dbInfos == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + goto _exit_failure; + } - //if (read_len == 0 || isCommentLine(line)) { // line starts with # - if (read_len == 0 ) { - continue; - } + char command[COMMAND_SIZE]; - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } + /* Connect to server */ + taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + goto _exit_failure; + } - memcpy(cmd + cmd_len, line, read_len); - cmd[read_len + cmd_len]= '\0'; - if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", - __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); - } + /* --------------------------------- Main Code -------------------------------- */ + /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ + /* */ + dumpCharset(fp); - memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); - if (lineNo >= lastRowsPrint) { - printf(" %d lines already be executed from file %s\n", lineNo, fileName); - lastRowsPrint += 5000000; - } + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, command, taos_errstr(result)); + goto _exit_failure; } - tfree(cmd); - tfree(line); - fclose(fp); - return 0; -} + TAOS_FIELD *fields = taos_fetch_fields(result); -static void* dumpInWorkThreadFp(void *arg) -{ - threadInfo *pThread = (threadInfo*)arg; - setThreadName("dumpInWorkThrd"); + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + && (!g_args.allow_sys)) { + continue; + } - for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { - if (f % pThread->totalThreads == pThread->threadIndex) { - char *SQLFileName = g_tsDumpInSqlFiles[f]; - FILE* fp = taosOpenDumpInFile(SQLFileName); - if (NULL == fp) { + if (g_args.databases) { // input multi dbs + if (inDatabasesSeq( + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { continue; } - fprintf(stderr, ", Success Open input file: %s\n", - SQLFileName); - dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName); + } else if (!g_args.all_databases) { // only input one db + if (strncasecmp(g_args.arg_list[0], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) + continue; } - } - return NULL; -} - -static void startDumpInWorkThreads() -{ - pthread_attr_t thattr; - threadInfo *pThread; - int32_t totalThreads = g_args.thread_num; - - if (totalThreads > g_tsSqlFileNum) { - totalThreads = g_tsSqlFileNum; - } + g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (g_dbInfos[count] == NULL) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); + goto _exit_failure; + } - threadInfo *threadObj = (threadInfo *)calloc( - totalThreads, sizeof(threadInfo)); + okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); + tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + min(TSDB_DB_NAME_LEN, + fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1)); + if (g_args.with_property) { + g_dbInfos[count]->ntables = + *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + g_dbInfos[count]->vgroups = + *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + g_dbInfos[count]->replica = + *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + g_dbInfos[count]->quorum = + *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + g_dbInfos[count]->days = + *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - if (NULL == threadObj) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - } + tstrncpy(g_dbInfos[count]->keeplist, + (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); + //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + //g_dbInfos[count]->daysToKeep1; + //g_dbInfos[count]->daysToKeep2; + g_dbInfos[count]->cache = + *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + g_dbInfos[count]->blocks = + *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + g_dbInfos[count]->minrows = + *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + g_dbInfos[count]->maxrows = + *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + g_dbInfos[count]->wallevel = + *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + g_dbInfos[count]->fsync = + *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + g_dbInfos[count]->comp = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + g_dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - for (int32_t t = 0; t < totalThreads; ++t) { - pThread = threadObj + t; - pThread->threadIndex = t; - pThread->totalThreads = totalThreads; - pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (pThread->taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - free(threadObj); - return; + tstrncpy(g_dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + DB_PRECISION_LEN); + g_dbInfos[count]->update = + *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } - pthread_attr_init(&thattr); - pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + count++; - if (pthread_create(&(pThread->threadID), &thattr, - dumpInWorkThreadFp, (void*)pThread) != 0) { - errorPrint("%s() LN%d, thread:%d failed to start\n", - __func__, __LINE__, pThread->threadIndex); - exit(0); + if (g_args.databases) { + if (count > g_args.dumpDbCount) + break; + } else if (!g_args.all_databases) { + if (count >= 1) + break; } } - for (int t = 0; t < totalThreads; ++t) { - pthread_join(threadObj[t].threadID, NULL); - } - - for (int t = 0; t < totalThreads; ++t) { - taos_close(threadObj[t].taos); + if (count == 0) { + errorPrint("%d databases valid to dump\n", count); + goto _exit_failure; } - free(threadObj); -} - -static int dumpIn() { - assert(g_args.isDumpIn); - TAOS *taos = NULL; - FILE *fp = NULL; + if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases + for (int i = 0; i < count; i++) { + int64_t records = 0; + records = dumpWholeDatabase(g_dbInfos[i], fp); + if (records >= 0) { + okPrint("Database %s dumped\n", g_dbInfos[i]->name); + g_totalDumpOutRows += records; + } + } + } else { + if (1 == g_args.arg_list_len) { + int64_t records = dumpWholeDatabase(g_dbInfos[0], fp); + if (records >= 0) { + okPrint("Database %s dumped\n", g_dbInfos[0]->name); + g_totalDumpOutRows += records; + } + } else { + dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); + } - taos = taos_connect( - g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (taos == NULL) { - errorPrint("%s() LN%d, failed to connect to TDengine server\n", - __func__, __LINE__); - return -1; - } + int superTblCnt = 0 ; + for (int i = 1; g_args.arg_list[i]; i++) { + TableRecordInfo tableRecordInfo; - taosGetDirectoryFileList(g_args.inpath); + if (getTableRecordInfo(g_dbInfos[0]->name, + g_args.arg_list[i], + &tableRecordInfo) < 0) { + errorPrint("input the invalid table %s\n", + g_args.arg_list[i]); + continue; + } - int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum; - if (g_tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; + int64_t records = 0; + if (tableRecordInfo.isStb) { // dump all table of this stable + int ret = dumpStableClasuse( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + fp); + if (ret >= 0) { + superTblCnt++; + records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); + } + } else if (tableRecordInfo.belongStb){ + dumpStableClasuse( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + fp); + records = dumpNormalTableBelongStb( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + g_args.arg_list[i]); + } else { + records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]); + } - fp = taosOpenDumpInFile(g_tsDbSqlFile); - if (NULL == fp) { - errorPrint("%s() LN%d, failed to open input file %s\n", - __func__, __LINE__, g_tsDbSqlFile); - return -1; + if (records >= 0) { + okPrint("table: %s dumped\n", g_args.arg_list[i]); + g_totalDumpOutRows += records; + } } - fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile); - - loadFileCharset(fp, g_tsCharset); - - dumpInOneFile(taos, fp, g_tsCharset, g_args.encode, - g_tsDbSqlFile); } taos_close(taos); - if (0 != tsSqlFileNumOfTbls) { - startDumpInWorkThreads(); - } - - freeDumpFiles(); + /* Close the handle and return */ + fclose(fp); + taos_free_result(result); + freeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); return 0; + +_exit_failure: + fclose(fp); + taos_close(taos); + taos_free_result(result); + freeDbInfos(); + errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); + return -1; } int main(int argc, char *argv[]) { @@ -2988,7 +3910,10 @@ int main(int argc, char *argv[]) { printf("databasesSeq: %s\n", g_args.databasesSeq); printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); printf("with_property: %s\n", g_args.with_property?"true":"false"); +#ifdef AVRO_SUPPORT printf("avro format: %s\n", g_args.avro?"true":"false"); + printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]); +#endif printf("start_time: %" PRId64 "\n", g_args.start_time); printf("human readable start time: %s \n", g_args.humanStartTime); printf("end_time: %" PRId64 "\n", g_args.end_time); @@ -3042,7 +3967,10 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq); fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); +#ifdef AVRO_SUPPORT fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); + fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]); +#endif fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); @@ -3072,6 +4000,7 @@ int main(int argc, char *argv[]) { tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); if (dumpIn() < 0) { + errorPrint("%s\n", "dumpIn() failed!"); ret = -1; } } else { @@ -3103,4 +4032,3 @@ int main(int argc, char *argv[]) { return ret; } - diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 4cf444bab2f05816c1af55d96156334800d758d5..66d15e48ed13e1dce9a38bd2db65e9e610209e50 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,26 +1,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) -if(NOT WIN32) - string(ASCII 27 Esc) - set(ColourReset "${Esc}[m") - set(ColourBold "${Esc}[1m") - set(Red "${Esc}[31m") - set(Green "${Esc}[32m") - set(Yellow "${Esc}[33m") - set(Blue "${Esc}[34m") - set(Magenta "${Esc}[35m") - set(Cyan "${Esc}[36m") - set(White "${Esc}[37m") - set(BoldRed "${Esc}[1;31m") - set(BoldGreen "${Esc}[1;32m") - set(BoldYellow "${Esc}[1;33m") - set(BoldBlue "${Esc}[1;34m") - set(BoldMagenta "${Esc}[1;35m") - set(BoldCyan "${Esc}[1;36m") - set(BoldWhite "${Esc}[1;37m") -endif() - ADD_SUBDIRECTORY(monitor) IF (TD_BUILD_HTTP) @@ -57,8 +37,15 @@ ELSE () DEPENDS taos BUILD_IN_SOURCE 1 CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}" - INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/ + INSTALL_COMMAND + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 && ./upx blm3 + COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./blm3.service ${CMAKE_BINARY_DIR}/test/cfg/ ) ENDIF () diff --git a/src/plugins/blm3 b/src/plugins/blm3 index ba539ce69dc4fe53536e9b0517fe75917dce5c46..598cb96ee60ec6a16c5b8b07ea8ca9748799e7e1 160000 --- a/src/plugins/blm3 +++ b/src/plugins/blm3 @@ -1 +1 @@ -Subproject commit ba539ce69dc4fe53536e9b0517fe75917dce5c46 +Subproject commit 598cb96ee60ec6a16c5b8b07ea8ca9748799e7e1 diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index ccbcc985118b132369a1ee3895f4341e6cca6d59..f26a4b4c8bdda05f801075b70c1b762882adfd27 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext, - sizeof(TSDB_CACHE_PTR_TYPE), 3000); + sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 649086c892547a4cda7cb49ad7c9b52113fd1b97..d7a1591c6c6ee8ef0ff2d14841c621198bc6357f 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -402,7 +402,7 @@ typedef struct SQInfo { int32_t dataReady; // denote if query result is ready or not void* rspContext; // response context int64_t startExecTs; // start to exec timestamp - int64_t lastRetrieveTs; // last retrieve timestamp + int64_t lastRetrieveTs; // last retrieve timestamp char* sql; // query sql string SQueryCostInfo summary; } SQInfo; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1034f3c3152ddf4d539f46d539b89f38a3543ee9..2fa6cfdb5beac676811be4c36fc3e3ff5ff00cb5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4188,31 +4188,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data compSizes = tcalloc(numOfCols, sizeof(int32_t)); } - if (pQueryAttr->pExpr2 == NULL) { - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - if (compressed) { - compSizes[col] = compressQueryColData(pColRes, pRes->info.rows, data, compressed); - data += compSizes[col]; - *compLen += compSizes[col]; - compSizes[col] = htonl(compSizes[col]); - } else { - memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows); - data += pColRes->info.bytes * pRes->info.rows; - } - } - } else { - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - if (compressed) { - compSizes[col] = htonl(compressQueryColData(pColRes, numOfRows, data, compressed)); - data += compSizes[col]; - *compLen += compSizes[col]; - compSizes[col] = htonl(compSizes[col]); - } else { - memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows); - data += pColRes->info.bytes * numOfRows; - } + for (int32_t col = 0; col < numOfCols; ++col) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); + if (compressed) { + compSizes[col] = compressQueryColData(pColRes, numOfRows, data, compressed); + data += compSizes[col]; + *compLen += compSizes[col]; + compSizes[col] = htonl(compSizes[col]); + } else { + memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows); + data += pColRes->info.bytes * numOfRows; } } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 258a29b90b40f4a5a630c17328a927923e1f1be6..c52fbf208f6fbf0384ecf66650919c4d12ae352e 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 128 +#define TSDB_CFG_MAX_NUM 130 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml index 34518900ed30f48effd47a8786233080f3e5291f..81c549274c81ddc69d52508c46cd215edd8c5467 100644 --- a/tests/examples/JDBC/connectionPools/pom.xml +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.34 diff --git a/tests/examples/JDBC/readme.md b/tests/examples/JDBC/readme.md index 9a017f4feab148cb7c3fd4132360c3075c6573cb..35dfb341d7d62bb283897523f928e04dabea962d 100644 --- a/tests/examples/JDBC/readme.md +++ b/tests/examples/JDBC/readme.md @@ -10,4 +10,4 @@ | 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces | -more detail: https://www.taosdata.com/cn//documentation20/connector-java/ \ No newline at end of file +more detail: https://www.taosdata.com/cn/documentation20/connector/java diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c1b75b6e3f928bb9d5ef9231402f03c5a73d274c..101508684c61da848333e7043b21c8d4ec8ede45 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -27,7 +27,7 @@ python3 ./test.py -f insert/bug3654.py python3 ./test.py -f insert/insertDynamicColBeforeVal.py python3 ./test.py -f insert/in_function.py python3 ./test.py -f insert/modify_column.py -python3 ./test.py -f insert/line_insert.py +#python3 ./test.py -f insert/line_insert.py python3 ./test.py -f insert/specialSql.py # timezone @@ -416,9 +416,9 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py python3 ./test.py -f query/queryRegex.py python3 ./test.py -f tools/taosdemoTestdatatype.py -python3 ./test.py -f insert/schemalessInsert.py -python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py -python3 ./test.py -f insert/openTsdbJsonInsert.py +#python3 ./test.py -f insert/schemalessInsert.py +#python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py +#python3 ./test.py -f insert/openTsdbJsonInsert.py #======================p4-end=============== diff --git a/tests/pytest/insert/special_character_show.py b/tests/pytest/insert/special_character_show.py index 3b2df5c87380c22fb18cbee06c866249b4365a70..ce9f1de76aa5896beb3aa78dce8a3a65a81a973c 100644 --- a/tests/pytest/insert/special_character_show.py +++ b/tests/pytest/insert/special_character_show.py @@ -31,9 +31,8 @@ class TDTestCase: tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))') - tdLog.info('=============== step2,create table增加了转义字符') + tdLog.info('=============== step2,create table with escape character') tdLog.info('create table tb1 using stb1 tags("abc\\"def")') - #增加了转义字符\ tdSql.execute('create table tb1 using stb1 tags("abc\\"def")') tdLog.info('=============== step3,insert data') diff --git a/tests/test/c/createNormalTable.c b/tests/test/c/createNormalTable.c index 60253e2add1ebaa1e6c2c00b073cf13672789346..0dad7eb9b68a5584f4f6347c74b8266299c03da4 100644 --- a/tests/test/c/createNormalTable.c +++ b/tests/test/c/createNormalTable.c @@ -233,5 +233,5 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s replica:%d %s", GREEN, replica, NC); - pPrint("%s start create table performace test %s", GREEN, NC); + pPrint("%s start create table performance test %s", GREEN, NC); } diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c index b94c687f2cba7310949b0a3b12b6f4fc007e5a9a..0e81279819ec8c1c1c0e5601a24193823997c914 100644 --- a/tests/test/c/createTablePerformance.c +++ b/tests/test/c/createTablePerformance.c @@ -221,5 +221,5 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s replica:%d %s", GREEN, replica, NC); - pPrint("%s start create table performace test %s", GREEN, NC); + pPrint("%s start create table performance test %s", GREEN, NC); }