提交 d7b14860 编写于 作者: S shenglian zhou

Merge branch 'develop' into szhou/feature/support-math-functions

...@@ -19,3 +19,6 @@ ...@@ -19,3 +19,6 @@
[submodule "src/plugins/blm3"] [submodule "src/plugins/blm3"]
path = src/plugins/blm3 path = src/plugins/blm3
url = https://github.com/taosdata/blm3 url = https://github.com/taosdata/blm3
[submodule "deps/avro"]
path = deps/avro
url = https://github.com/apache/avro
...@@ -15,6 +15,26 @@ ELSE () ...@@ -15,6 +15,26 @@ ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 3.0) CMAKE_MINIMUM_REQUIRED(VERSION 3.0)
ENDIF () ENDIF ()
if(NOT WIN32)
string(ASCII 27 Esc)
set(ColourReset "${Esc}[m")
set(ColourBold "${Esc}[1m")
set(Red "${Esc}[31m")
set(Green "${Esc}[32m")
set(Yellow "${Esc}[33m")
set(Blue "${Esc}[34m")
set(Magenta "${Esc}[35m")
set(Cyan "${Esc}[36m")
set(White "${Esc}[37m")
set(BoldRed "${Esc}[1;31m")
set(BoldGreen "${Esc}[1;32m")
set(BoldYellow "${Esc}[1;33m")
set(BoldBlue "${Esc}[1;34m")
set(BoldMagenta "${Esc}[1;35m")
set(BoldCyan "${Esc}[1;36m")
set(BoldWhite "${Esc}[1;37m")
endif()
SET(TD_ACCOUNT FALSE) SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE) SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)
......
...@@ -107,7 +107,77 @@ def pre_test(){ ...@@ -107,7 +107,77 @@ def pre_test(){
make > /dev/null make > /dev/null
make install > /dev/null make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/ || echo "not install" pip3 install ${WKC}/src/connector/python/
'''
return 1
}
def pre_test_noinstall(){
sh'hostname'
sh'''
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
git checkout 2.0
'''
}
else{
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd ${WK}
git reset --hard HEAD~10
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WK}
git checkout 2.0
'''
}
else{
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
''' '''
return 1 return 1
} }
...@@ -460,31 +530,55 @@ pipeline { ...@@ -460,31 +530,55 @@ pipeline {
stage('arm64centos7') { stage('arm64centos7') {
agent{label " arm64centos7 "} agent{label " arm64centos7 "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64centos8') { stage('arm64centos8') {
agent{label " arm64centos8 "} agent{label " arm64centos8 "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm32bionic') { stage('arm32bionic') {
agent{label " arm32bionic "} agent{label " arm32bionic "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64bionic') { stage('arm64bionic') {
agent{label " arm64bionic "} agent{label " arm64bionic "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64focal') { stage('arm64focal') {
agent{label " arm64focal "} agent{label " arm64focal "}
steps { steps {
pre_test() pre_test_noinstall()
}
}
stage('centos7') {
agent{label " centos7 "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:trusty') {
agent{label " trusty "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:xenial') {
agent{label " xenial "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:bionic') {
agent{label " bionic "}
steps {
pre_test_noinstall()
} }
} }
......
...@@ -128,7 +128,6 @@ IF (TD_APLHINE) ...@@ -128,7 +128,6 @@ IF (TD_APLHINE)
MESSAGE(STATUS "aplhine is defined") MESSAGE(STATUS "aplhine is defined")
ENDIF () ENDIF ()
MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP})
IF ("${BUILD_HTTP}" STREQUAL "") IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX) IF (TD_LINUX)
IF (TD_ARM_32) IF (TD_ARM_32)
...@@ -140,7 +139,6 @@ IF ("${BUILD_HTTP}" STREQUAL "") ...@@ -140,7 +139,6 @@ IF ("${BUILD_HTTP}" STREQUAL "")
SET(BUILD_HTTP "true") SET(BUILD_HTTP "true")
ENDIF () ENDIF ()
ENDIF () ENDIF ()
MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP})
IF (${BUILD_HTTP} MATCHES "true") IF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE) SET(TD_BUILD_HTTP TRUE)
...@@ -150,6 +148,14 @@ IF (TD_BUILD_HTTP) ...@@ -150,6 +148,14 @@ IF (TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED) ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF () ENDIF ()
IF ("${AVRO_SUPPORT}" MATCHES "true")
SET(TD_AVRO_SUPPORT TRUE)
ENDIF ()
IF (TD_AVRO_SUPPORT)
ADD_DEFINITIONS(-DAVRO_SUPPORT)
ENDIF ()
IF (TD_LINUX) IF (TD_LINUX)
ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_LINUX) ADD_DEFINITIONS(-D_LINUX)
......
...@@ -92,6 +92,8 @@ ENDIF () ...@@ -92,6 +92,8 @@ ENDIF ()
SET(TD_BUILD_HTTP FALSE) SET(TD_BUILD_HTTP FALSE)
SET(TD_AVRO_SUPPORT FALSE)
SET(TD_MEMORY_SANITIZER FALSE) SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true") IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE) SET(TD_MEMORY_SANITIZER TRUE)
......
...@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT) ...@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)
ENDIF () ENDIF ()
IF (AVRO_SUPPORT)
MESSAGE("")
MESSAGE("${Green} ENABLE avro format support ${ColourReset}")
MESSAGE("")
include(ExternalProject)
ExternalProject_Add(
apache-avro
PREFIX "avro"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c
BUILD_IN_SOURCE 1
PATCH_COMMAND
COMMAND git clean -f -d
COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build
)
ELSE ()
MESSAGE("")
MESSAGE("${Yellow} NO avro format support ${ColourReset}")
MESSAGE("")
ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED) IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("")
MESSAGE("${Green} ENABLE jemalloc ${ColourReset}")
MESSAGE("")
MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc ExternalProject_Add(jemalloc
PREFIX "jemalloc" PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
......
Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
## <a class="anchor" id="grafana"></a>Grafana ## <a class="anchor" id="grafana"></a>Grafana
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。
### 安装Grafana ### 安装Grafana
...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 配置Grafana ### 配置Grafana
TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。 TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。
以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: 以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
``` ```
Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
```ini
[plugins] [plugins]
enable_alpha = true allow_loading_unsigned_plugins = tdengine-datasource
allow_loading_unsigned_plugins = taosdata-tdengine-datasource
``` ```
### 使用 Grafana ### 使用 Grafana
...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
* ALIAS BY:可设置当前查询别名。 * ALIAS BY:可设置当前查询别名。
* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 * GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![img](../images/connections/create_dashboard2.jpg) ![img](../images/connections/create_dashboard2.jpg)
...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
#### 导入 Dashboard #### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard 我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件 点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
导入完成之后可看到如下效果: 导入完成之后可看到如下效果:
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a>MATLAB ## <a class="anchor" id="matlab"></a>MATLAB
......
...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 下载 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 修改 /etc/telegraf/telegraf.conf ### 修改 /etc/telegraf/telegraf.conf
...@@ -61,7 +63,7 @@ sudo systemctl start telegraf ...@@ -61,7 +63,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。 点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘: 点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) ![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png)
......
...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 复制 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 配置 collectd ### 配置 collectd
...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S ...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S
#### 导入 collectd 仪表盘 #### 导入 collectd 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示选择 JSON 文件导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png) ![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png)
#### 导入 StatsD 仪表盘 #### 导入 StatsD 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 文件。如果安装 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示导入JSON文件。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png) ![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png)
## 总结 ## 总结
......
...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download. ...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download.
### Configure Grafana ### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory. Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> .
```bash
GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
``` ```
### Use Grafana ### Use Grafana
...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp ...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard #### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`. We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file: Click the `Import` button on the left panel and load the grafana id:
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported. You can see as follows after Dashboard imported.
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a> MATLAB ## <a class="anchor" id="matlab"></a> MATLAB
......
...@@ -203,6 +203,9 @@ keepColumnName 1 ...@@ -203,6 +203,9 @@ keepColumnName 1
# database name must be specified in restful interface if the following parameter is set, off by default # database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1 # httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files. # The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters # max number of lines per log filters
# numOfLogLines 10000000 # numOfLogLines 10000000
......
...@@ -128,12 +128,12 @@ function check_link() { ...@@ -128,12 +128,12 @@ function check_link() {
function check_main_path() { function check_main_path() {
#check install main dir and all sub dir #check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d") main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in ${main_dir[@]};do for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i check_file ${install_main_dir} $i
done done
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs") nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in ${nginx_main_dir[@]};do for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i check_file ${nginx_dir} $i
done done
fi fi
...@@ -143,11 +143,11 @@ function check_main_path() { ...@@ -143,11 +143,11 @@ function check_main_path() {
function check_bin_path() { function check_bin_path() {
# check install bin dir and all sub dir # check install bin dir and all sub dir
bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
for i in ${bin_dir[@]};do for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i check_file ${sbin_dir} $i
done done
lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
for i in ${lbin_dir[@]};do for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i check_link ${bin_link_dir}/$i
done done
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
...@@ -171,7 +171,7 @@ function check_lib_path() { ...@@ -171,7 +171,7 @@ function check_lib_path() {
function check_header_path() { function check_header_path() {
# check all header # check all header
header_dir=("taos.h" "taoserror.h") header_dir=("taos.h" "taoserror.h")
for i in ${header_dir[@]};do for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i check_link ${inc_link_dir}/$i
done done
echo -e "Check bin path:\033[32mOK\033[0m!" echo -e "Check bin path:\033[32mOK\033[0m!"
......
...@@ -68,12 +68,6 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat ...@@ -68,12 +68,6 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
else
echo "grafanaplugin bundled directory not found!"
exit 1
fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
......
...@@ -151,7 +151,7 @@ function vercomp () { ...@@ -151,7 +151,7 @@ function vercomp () {
} }
# 1. check version information # 1. check version information
if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then
echo "please enter correct version" echo "please enter correct version"
exit 0 exit 0
fi fi
......
...@@ -36,7 +36,7 @@ local cur_dir ...@@ -36,7 +36,7 @@ local cur_dir
cd $1 cd $1
cur_dir=$(pwd) cur_dir=$(pwd)
for dirlist in $(ls ${cur_dir}); do for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then if test -d ${dirlist}; then
cd ${dirlist} cd ${dirlist}
cp_rpm_package ${cur_dir}/${dirlist} cp_rpm_package ${cur_dir}/${dirlist}
......
...@@ -73,12 +73,6 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin ...@@ -73,12 +73,6 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
......
# /bin/bash #!/bin/bash
# #
CSI=$(echo -e "\033[") CSI=$(echo -e "\033[")
CRED="${CSI}1;31m" CRED="${CSI}1;31m"
......
...@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() { ...@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -358,7 +358,7 @@ function is_correct_ipaddr() { ...@@ -358,7 +358,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { ...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -342,7 +342,7 @@ function is_correct_ipaddr() { ...@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() { ...@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -330,7 +330,7 @@ function is_correct_ipaddr() { ...@@ -330,7 +330,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { ...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -342,7 +342,7 @@ function is_correct_ipaddr() { ...@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -212,7 +212,8 @@ function install_jemalloc() { ...@@ -212,7 +212,8 @@ function install_jemalloc() {
fi fi
if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc /usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
/usr/local/include/jemalloc
fi fi
if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then
/usr/bin/install -c -d /usr/local/lib /usr/bin/install -c -d /usr/local/lib
...@@ -225,25 +226,49 @@ function install_jemalloc() { ...@@ -225,25 +226,49 @@ function install_jemalloc() {
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig /usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
/usr/local/lib/pkgconfig
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi fi
fi fi
if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc /usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
/usr/local/share/doc/jemalloc
fi fi
if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then
/usr/bin/install -c -d /usr/local/share/man/man3 /usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3 /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
/usr/local/share/man/man3
fi fi
fi
}
function install_avro() {
if [ "$osType" != "Darwin" ]; then
if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
/usr/bin/install -c -d /usr/local/$1
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
ln -sf libavro.so.23 /usr/local/$1/libavro.so
/usr/bin/install -c -d /usr/local/$1
[ -f ${binary_dir}/build/$1/libavro.a ] &&
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
if [ -d /etc/ld.so.conf.d ]; then if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
${csudo} ldconfig ${csudo} ldconfig
else else
echo "/etc/ld.so.conf.d not found!" echo "/etc/ld.so.conf.d not found!"
fi fi
fi fi
fi
} }
function install_lib() { function install_lib() {
...@@ -292,6 +317,8 @@ function install_lib() { ...@@ -292,6 +317,8 @@ function install_lib() {
fi fi
install_jemalloc install_jemalloc
install_avro lib
install_avro lib64
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig ${csudo} ldconfig
...@@ -381,11 +408,6 @@ function install_data() { ...@@ -381,11 +408,6 @@ function install_data() {
} }
function install_connector() { function install_connector() {
if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else else
......
...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector" ...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo ...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then #if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: # cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
# else
# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
# fi
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then # if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector # cp -r ${connector_dir}/go ${install_dir}/connector
# else # else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -127,7 +127,7 @@ function add_newHostname_to_hosts() { ...@@ -127,7 +127,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -182,7 +182,7 @@ function is_correct_ipaddr() { ...@@ -182,7 +182,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -2501,6 +2501,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2501,6 +2501,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg12 = "parameter is out of range [1, 100]"; const char* msg12 = "parameter is out of range [1, 100]";
const char* msg13 = "parameter list required"; const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
const char* msg15 = "parameter is out of range [1, 1000]";
switch (functionId) { switch (functionId) {
case TSDB_FUNC_COUNT: { case TSDB_FUNC_COUNT: {
...@@ -2948,11 +2949,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2948,11 +2949,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} }
} }
} else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t numRowsSelected = GET_INT32_VAL(val); int64_t numRowsSelected = GET_INT64_VAL(val);
if (numRowsSelected <= 0 || numRowsSelected > 1000) { if (numRowsSelected <= 0 || numRowsSelected > 1000) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
} }
// todo REFACTOR // todo REFACTOR
......
...@@ -64,10 +64,11 @@ extern int32_t tsCompressColData; ...@@ -64,10 +64,11 @@ extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults; extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[]; extern char tsTempDir[];
//query buffer management // query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing extern int64_t
extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked
extern int8_t tsKeepOriginalColumnName; extern int8_t tsKeepOriginalColumnName;
...@@ -108,7 +109,7 @@ extern int32_t tsQuorum; ...@@ -108,7 +109,7 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate; extern int8_t tsUpdate;
extern int8_t tsCacheLastRow; extern int8_t tsCacheLastRow;
//tsdb // tsdb
extern bool tsdbForceKeepFile; extern bool tsdbForceKeepFile;
extern bool tsdbForceCompactFile; extern bool tsdbForceCompactFile;
extern int32_t tsdbWalFlushSize; extern int32_t tsdbWalFlushSize;
...@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress; ...@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql; extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum; extern int8_t tsTelegrafUseFieldNum;
extern int8_t tsHttpDbNameMandatory; extern int8_t tsHttpDbNameMandatory;
extern int32_t tsHttpKeepAlive;
// mqtt // mqtt
extern int8_t tsEnableMqttModule; extern int8_t tsEnableMqttModule;
......
...@@ -14,18 +14,18 @@ ...@@ -14,18 +14,18 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "tglobal.h"
#include "monitor.h"
#include "os.h" #include "os.h"
#include "taosdef.h" #include "taosdef.h"
#include "taoserror.h" #include "taoserror.h"
#include "tulog.h" #include "tcompare.h"
#include "tconfig.h" #include "tconfig.h"
#include "tglobal.h"
#include "monitor.h"
#include "tsocket.h"
#include "tutil.h"
#include "tlocale.h" #include "tlocale.h"
#include "tsocket.h"
#include "ttimezone.h" #include "ttimezone.h"
#include "tcompare.h" #include "tulog.h"
#include "tutil.h"
// cluster // cluster
char tsFirst[TSDB_EP_LEN] = {0}; char tsFirst[TSDB_EP_LEN] = {0};
...@@ -51,7 +51,7 @@ int64_t tsDnodeStartTime = 0; ...@@ -51,7 +51,7 @@ int64_t tsDnodeStartTime = 0;
// common // common
int32_t tsRpcTimer = 300; int32_t tsRpcTimer = 300;
int32_t tsRpcMaxTime = 600; // seconds; int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000; int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000; int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second int32_t tsShellActivityTimer = 3; // second
...@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000; ...@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t tsStreamCompStartDelay = 10000; int32_t tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly // the stream computing delay time after executing failed, change accordingly
int32_t tsRetryStreamCompDelay = 10*1000; int32_t tsRetryStreamCompDelay = 10 * 1000;
// The delayed computing ration. 10% of the whole computing time window by default. // The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f; float tsStreamComputDelayRatio = 0.1f;
...@@ -180,6 +180,7 @@ int8_t tsHttpEnableCompress = 1; ...@@ -180,6 +180,7 @@ int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0; int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0; int8_t tsTelegrafUseFieldNum = 0;
int8_t tsHttpDbNameMandatory = 0; int8_t tsHttpDbNameMandatory = 0;
int32_t tsHttpKeepAlive = 30000;
// mqtt // mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
...@@ -274,7 +275,8 @@ int8_t tsClientMerge = 0; ...@@ -274,7 +275,8 @@ int8_t tsClientMerge = 0;
// //
// lossy compress 6 // lossy compress 6
// //
char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress. char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
// can close lossy compress.
// below option can take effect when tsLossyColumns not empty // below option can take effect when tsLossyColumns not empty
double fPrecision = 1E-8; // float column precision double fPrecision = 1E-8; // float column precision
double dPrecision = 1E-16; // double column precision double dPrecision = 1E-16; // double column precision
...@@ -326,7 +328,8 @@ bool taosCfgDynamicOptions(char *msg) { ...@@ -326,7 +328,8 @@ bool taosCfgDynamicOptions(char *msg) {
int32_t vint = 0; int32_t vint = 0;
paGetToken(msg, &option, &olen); paGetToken(msg, &option, &olen);
if (olen == 0) return false;; if (olen == 0) return false;
;
paGetToken(option + olen + 1, &value, &vlen); paGetToken(option + olen + 1, &value, &vlen);
if (vlen == 0) if (vlen == 0)
...@@ -339,7 +342,7 @@ bool taosCfgDynamicOptions(char *msg) { ...@@ -339,7 +342,7 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i; SGlobalCfg *cfg = tsGlobalConfig + i;
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
int32_t cfgLen = (int32_t)strlen(cfg->option); int32_t cfgLen = (int32_t)strlen(cfg->option);
...@@ -427,7 +430,7 @@ static void taosCheckDataDirCfg() { ...@@ -427,7 +430,7 @@ static void taosCheckDataDirCfg() {
} }
static int32_t taosCheckTmpDir(void) { static int32_t taosCheckTmpDir(void) {
if (strlen(tsTempDir) <= 0){ if (strlen(tsTempDir) <= 0) {
uError("tempDir is not set"); uError("tempDir is not set");
return -1; return -1;
} }
...@@ -577,8 +580,8 @@ static void doInitGlobalConfig(void) { ...@@ -577,8 +580,8 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsMaxNumOfDistinctResults; cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10*10000; cfg.minValue = 10 * 10000;
cfg.maxValue = 10000*10000; cfg.maxValue = 10000 * 10000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
...@@ -1320,6 +1323,17 @@ static void doInitGlobalConfig(void) { ...@@ -1320,6 +1323,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// pContext in cache
cfg.option = "httpKeepAlive";
cfg.ptr = &tsHttpKeepAlive;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 3000;
cfg.maxValue = 3600000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// debug flag // debug flag
cfg.option = "numOfLogLines"; cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines; cfg.ptr = &tsNumOfLogLines;
...@@ -1401,7 +1415,6 @@ static void doInitGlobalConfig(void) { ...@@ -1401,7 +1415,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "sdbDebugFlag"; cfg.option = "sdbDebugFlag";
cfg.ptr = &sdbDebugFlag; cfg.ptr = &sdbDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
...@@ -1731,12 +1744,9 @@ static void doInitGlobalConfig(void) { ...@@ -1731,12 +1744,9 @@ static void doInitGlobalConfig(void) {
#else #else
assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif #endif
} }
void taosInitGlobalCfg() { void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); }
pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
}
int32_t taosCheckGlobalCfg() { int32_t taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN]; char fqdn[TSDB_FQDN_LEN];
...@@ -1794,8 +1804,8 @@ int32_t taosCheckGlobalCfg() { ...@@ -1794,8 +1804,8 @@ int32_t taosCheckGlobalCfg() {
} }
if (tsMaxTablePerVnode < tsMinTablePerVnode) { if (tsMaxTablePerVnode < tsMinTablePerVnode) {
uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode,
tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode); tsMinTablePerVnode, tsMinTablePerVnode);
tsMaxTablePerVnode = tsMinTablePerVnode; tsMaxTablePerVnode = tsMinTablePerVnode;
} }
...@@ -1839,7 +1849,7 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { ...@@ -1839,7 +1849,7 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
char *temp = strchr(fqdn, ':'); char *temp = strchr(fqdn, ':');
if (temp) { if (temp) {
*temp = 0; *temp = 0;
*port = atoi(temp+1); *port = atoi(temp + 1);
} }
if (*port == 0) { if (*port == 0) {
......
...@@ -36,15 +36,15 @@ import java.util.regex.Pattern; ...@@ -36,15 +36,15 @@ import java.util.regex.Pattern;
* compatibility needs. * compatibility needs.
*/ */
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
// for jdbc preparedStatement interface
private String rawSql; private String rawSql;
private Object[] parameters; private Object[] parameters;
// for parameter binding
private ArrayList<ColumnInfo> colData; private long nativeStmtHandle = 0;
private String tableName;
private ArrayList<TableTagInfo> tableTags; private ArrayList<TableTagInfo> tableTags;
private int tagValueLength; private int tagValueLength;
private ArrayList<ColumnInfo> colData;
private String tableName;
private long nativeStmtHandle = 0;
TSDBPreparedStatement(TSDBConnection connection, String sql) { TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection); super(connection);
...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql(); preprocessSql();
} }
/*
*
*/
/** /**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override @Override
public void setObject(int parameterIndex, Object x) throws SQLException { public void setObject(int parameterIndex, Object x) throws SQLException {
if (isClosed()) { if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
} if (parameterIndex < 1 && parameterIndex >= parameters.length)
if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
}
parameters[parameterIndex - 1] = x; parameters[parameterIndex - 1] = x;
} }
...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
//TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
@Override @Override
...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible // NOTE: the following APIs are not JDBC compatible
// set the bind table name // parameter binding
private static class ColumnInfo { private static class ColumnInfo {
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
private ArrayList data; private ArrayList data;
...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
} }
} }
public void setTableName(String name) { public void setTableName(String name) throws SQLException {
if (this.tableName != null) {
this.columnDataExecuteBatch();
this.columnDataClearBatchInternal();
}
this.tableName = name; this.tableName = name;
} }
...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void columnDataExecuteBatch() throws SQLException { public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle); connector.executeBatch(this.nativeStmtHandle);
this.columnDataClearBatch(); this.columnDataClearBatchInternal();
} }
@Deprecated
public void columnDataClearBatch() { public void columnDataClearBatch() {
columnDataClearBatchInternal();
}
private void columnDataClearBatchInternal() {
int size = this.colData.size(); int size = this.colData.size();
this.colData.clear(); this.colData.clear();
this.colData.addAll(Collections.nCopies(size, null)); this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name this.tableName = null; // clear the table name
} }
public void columnDataCloseBatch() throws SQLException { public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle); connector.closeBatch(this.nativeStmtHandle);
...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.nativeStmtHandle = 0L; this.nativeStmtHandle = 0L;
this.tableName = null; this.tableName = null;
} }
@Override
public void close() throws SQLException {
this.columnDataClearBatchInternal();
this.columnDataCloseBatch();
super.close();
}
} }
...@@ -5,9 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers; ...@@ -5,9 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement; import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator; import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.NoHttpResponseException;
import org.apache.http.client.ClientProtocolException; import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*; import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.protocol.HttpClientContext;
...@@ -21,10 +19,7 @@ import org.apache.http.protocol.HTTP; ...@@ -21,10 +19,7 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException; import java.sql.SQLException;
...@@ -53,10 +48,9 @@ public class HttpClientPoolUtil { ...@@ -53,10 +48,9 @@ public class HttpClientPoolUtil {
return DEFAULT_HTTP_KEEP_TIME * 1000; return DEFAULT_HTTP_KEEP_TIME * 1000;
}; };
private static CloseableHttpClient httpClient; private static final CloseableHttpClient httpClient;
static { static {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
......
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class ParameterBindTest {
private static final String host = "127.0.0.1";
private static final String stable = "weather";
private Connection conn;
private final Random random = new Random(System.currentTimeMillis());
@Test
public void test() {
// given
String[] tbnames = {"t1", "t2", "t3"};
int rows = 10;
// when
insertIntoTables(tbnames, 10);
// then
assertRows(stable, tbnames.length * rows);
for (String t : tbnames) {
assertRows(t, rows);
}
}
@Test
public void testMultiThreads() {
// given
String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}};
int rows = 10;
// when
List<Thread> threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList());
threads.forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
// then
for (String[] table : tables) {
for (String t : table) {
assertRows(t, rows);
}
}
}
private void assertRows(String tbname, int rows) {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + tbname);
while (rs.next()) {
int count = rs.getInt(1);
Assert.assertEquals(rows, count);
}
} catch (SQLException e) {
e.printStackTrace();
}
}
private void insertIntoTables(String[] tbnames, int rowsEachTable) {
long current = System.currentTimeMillis();
String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 0; i < tbnames.length; i++) {
pstmt.setTableName(tbnames[i]);
pstmt.setTagInt(0, random.nextInt(100));
pstmt.setTagInt(1, random.nextInt(100));
ArrayList<Long> timestampList = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
timestampList.add(current + i * 1000 + j);
}
pstmt.setTimestamp(0, timestampList);
ArrayList<Integer> f1List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f1List.add(random.nextInt(100));
}
pstmt.setInt(1, f1List);
ArrayList<Integer> f2List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f2List.add(random.nextInt(100));
}
pstmt.setInt(2, f2List);
pstmt.columnDataAddBatch();
}
pstmt.columnDataExecuteBatch();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() {
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try {
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_pd");
stmt.execute("create database if not exists test_pd");
stmt.execute("use test_pd");
stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)");
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try {
// Statement stmt = conn.createStatement();
// stmt.execute("drop database if exists test_pd");
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
...@@ -2,7 +2,6 @@ package com.taosdata.jdbc.utils; ...@@ -2,7 +2,6 @@ package com.taosdata.jdbc.utils;
import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBError;
import org.junit.Test; import org.junit.Test;
...@@ -11,7 +10,6 @@ import java.net.URLEncoder; ...@@ -11,7 +10,6 @@ import java.net.URLEncoder;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
...@@ -27,11 +25,6 @@ public class HttpClientPoolUtilTest { ...@@ -27,11 +25,6 @@ public class HttpClientPoolUtilTest {
// given // given
List<Thread> threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> { List<Thread> threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> {
useDB(); useDB();
// try {
// TimeUnit.SECONDS.sleep(10);
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
})).collect(Collectors.toList()); })).collect(Collectors.toList());
threads.forEach(Thread::start); threads.forEach(Thread::start);
...@@ -43,7 +36,6 @@ public class HttpClientPoolUtilTest { ...@@ -43,7 +36,6 @@ public class HttpClientPoolUtilTest {
e.printStackTrace(); e.printStackTrace();
} }
} }
} }
private void useDB() { private void useDB() {
......
...@@ -3,6 +3,7 @@ PROJECT(TDengine) ...@@ -3,6 +3,7 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
...@@ -61,12 +62,22 @@ ENDIF () ...@@ -61,12 +62,22 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
IF (TD_LINUX) IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC}) ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdump taos_static cJson) IF (AVRO_SUPPORT)
TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos_static)
ENDIF()
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos cJson) IF (AVRO_SUPPORT)
TARGET_LINK_LIBRARIES(taosdump taos avro jansson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos)
ENDIF ()
ENDIF () ENDIF ()
ENDIF () ENDIF ()
...@@ -74,8 +85,8 @@ IF (TD_DARWIN) ...@@ -74,8 +85,8 @@ IF (TD_DARWIN)
# missing <argp.h> for macosx # missing <argp.h> for macosx
# ADD_EXECUTABLE(taosdump ${SRC}) # ADD_EXECUTABLE(taosdump ${SRC})
# IF (TD_SOMODE_STATIC) # IF (TD_SOMODE_STATIC)
# TARGET_LINK_LIBRARIES(taosdump taos_static cJson) # TARGET_LINK_LIBRARIES(taosdump taos_static jansson)
# ELSE () # ELSE ()
# TARGET_LINK_LIBRARIES(taosdump taos cJson) # TARGET_LINK_LIBRARIES(taosdump taos jansson)
# ENDIF () # ENDIF ()
ENDIF () ENDIF ()
...@@ -28,15 +28,24 @@ ...@@ -28,15 +28,24 @@
#include "tsdb.h" #include "tsdb.h"
#include "tutil.h" #include "tutil.h"
#define AVRO_SUPPORT 0
#if AVRO_SUPPORT == 1 static char **g_tsDumpInSqlFiles = NULL;
static char g_tsCharset[63] = {0};
#ifdef AVRO_SUPPORT
#include <avro.h> #include <avro.h>
#endif #include <jansson.h>
static char **g_tsDumpInAvroFiles = NULL;
static void print_json_aux(json_t *element, int indent);
#endif /* AVRO_SUPPORT */
#define TSDB_SUPPORT_NANOSECOND 1 #define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
#define MAX_PATH_LEN 4096 // max path length on linux is 4095
#define COMMAND_SIZE 65536 #define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766 #define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql" //#define DEFAULT_DUMP_FILE "taosdump.sql"
...@@ -46,8 +55,6 @@ ...@@ -46,8 +55,6 @@
static int converStringToReadable(char *str, int size, char *buf, int bufsize); static int converStringToReadable(char *str, int size, char *buf, int bufsize);
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
static void dumpCharset(FILE *fp);
static void loadFileCharset(FILE *fp, char *fcharset);
typedef struct { typedef struct {
short bytes; short bytes;
...@@ -64,7 +71,12 @@ typedef struct { ...@@ -64,7 +71,12 @@ typedef struct {
#define performancePrint(fmt, ...) \ #define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \ do { if (g_args.performance_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define warnPrint(fmt, ...) \
do { fprintf(stderr, "\033[33m"); \
fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \
fprintf(stderr, "\033[0m"); } while(0)
#define errorPrint(fmt, ...) \ #define errorPrint(fmt, ...) \
do { fprintf(stderr, "\033[31m"); \ do { fprintf(stderr, "\033[31m"); \
...@@ -208,14 +220,13 @@ typedef struct { ...@@ -208,14 +220,13 @@ typedef struct {
typedef struct { typedef struct {
pthread_t threadID; pthread_t threadID;
int32_t threadIndex; int32_t threadIndex;
int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN]; char stbName[TSDB_TABLE_NAME_LEN];
int precision; int precision;
TAOS *taos; TAOS *taos;
int64_t rowsOfDumpOut; int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut; int64_t count;
int64_t tableFrom; int64_t from;
} threadInfo; } threadInfo;
typedef struct { typedef struct {
...@@ -225,6 +236,44 @@ typedef struct { ...@@ -225,6 +236,44 @@ typedef struct {
int32_t totalDatabasesOfDumpOut; int32_t totalDatabasesOfDumpOut;
} resultStatistics; } resultStatistics;
#ifdef AVRO_SUPPORT
enum enAvro_Codec {
AVRO_CODEC_START = 0,
AVRO_CODEC_NULL = AVRO_CODEC_START,
AVRO_CODEC_DEFLATE,
AVRO_CODEC_SNAPPY,
AVRO_CODEC_LZMA,
AVRO_CODEC_UNKNOWN = 255
};
char *g_avro_codec[] = {
"null",
"deflate",
"snappy",
"lzma",
"unknown"
};
/* avro sectin begin */
#define RECORD_NAME_LEN 64
#define FIELD_NAME_LEN 64
#define TYPE_NAME_LEN 16
typedef struct FieldStruct_S {
char name[FIELD_NAME_LEN];
char type[TYPE_NAME_LEN];
} FieldStruct;
typedef struct RecordSchema_S {
char name[RECORD_NAME_LEN];
char *fields;
int num_fields;
} RecordSchema;
/* avro section end */
#endif
static int64_t g_totalDumpOutRows = 0; static int64_t g_totalDumpOutRows = 0;
SDbInfo **g_dbInfos = NULL; SDbInfo **g_dbInfos = NULL;
...@@ -276,14 +325,17 @@ static struct argp_option options[] = { ...@@ -276,14 +325,17 @@ static struct argp_option options[] = {
// dump format options // dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 2}, {"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, {"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
{"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, #ifdef AVRO_SUPPORT
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4},
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, #endif
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10},
{"debug", 'g', 0, 0, "Print debug info.", 8}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10},
{"debug", 'g', 0, 0, "Print debug info.", 15},
{0} {0}
}; };
...@@ -310,7 +362,10 @@ typedef struct arguments { ...@@ -310,7 +362,10 @@ typedef struct arguments {
// dump format option // dump format option
bool schemaonly; bool schemaonly;
bool with_property; bool with_property;
#ifdef AVRO_SUPPORT
bool avro; bool avro;
int avro_codec;
#endif
int64_t start_time; int64_t start_time;
char humanStartTime[HUMAN_TIME_LEN]; char humanStartTime[HUMAN_TIME_LEN];
int64_t end_time; int64_t end_time;
...@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0}; ...@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL; static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1; static int g_numOfCores = 1;
static int dumpOut();
static int dumpIn();
static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
FILE *fp);
static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
FILE *fp, char* dbName);
static int getTableDes(
char* dbName, char *table,
TableDef *stableDes, bool isSuperTable);
static int64_t dumpTableData(FILE *fp, char *tbName,
char* dbName,
int precision,
char *jsonAvroSchema);
static int checkParam();
static void freeDbInfos();
struct arguments g_args = { struct arguments g_args = {
// connection option // connection option
NULL, NULL,
...@@ -381,7 +420,10 @@ struct arguments g_args = { ...@@ -381,7 +420,10 @@ struct arguments g_args = {
// dump format option // dump format option
false, // schemaonly false, // schemaonly
true, // with_property true, // with_property
false, // avro format #ifdef AVRO_SUPPORT
false, // avro
AVRO_CODEC_SNAPPY, // avro_codec
#endif
-INT64_MAX + 1, // start_time -INT64_MAX + 1, // start_time
{0}, // humanStartTime {0}, // humanStartTime
INT64_MAX, // end_time INT64_MAX, // end_time
...@@ -392,7 +434,7 @@ struct arguments g_args = { ...@@ -392,7 +434,7 @@ struct arguments g_args = {
1, // table_batch 1, // table_batch
false, // allow_sys false, // allow_sys
// other options // other options
5, // thread_num 8, // thread_num
0, // abort 0, // abort
NULL, // arg_list NULL, // arg_list
0, // arg_list_len 0, // arg_list_len
...@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
} }
break; break;
#ifdef AVRO_SUPPORT
case 'v':
g_args.avro = true;
break;
case 'd':
for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) {
if (0 == strcmp(arg, g_avro_codec[i])) {
g_args.avro_codec = i;
break;
}
}
break;
#endif
case 'r': case 'r':
g_args.resultFile = arg; g_args.resultFile = arg;
break; break;
...@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'N': case 'N':
g_args.with_property = false; g_args.with_property = false;
break; break;
case 'v':
g_args.avro = true;
break;
case 'S': case 'S':
// parse time here. // parse time here.
break; break;
...@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.abort = 1; g_args.abort = 1;
break; break;
case ARGP_KEY_ARG: case ARGP_KEY_ARG:
if (strlen(state->argv[state->next - 1])) {
g_args.arg_list = &state->argv[state->next - 1]; g_args.arg_list = &state->argv[state->next - 1];
g_args.arg_list_len = state->argc - state->next + 1; g_args.arg_list_len = state->argc - state->next + 1;
}
state->next = state->argc; state->next = state->argc;
break; break;
...@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause( ...@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause(
for (; counter < numOfCols; counter++) { for (; counter < numOfCols; counter++) {
if (counter != count_temp) { if (counter != count_temp) {
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) { if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, ", \'%s\'", pstr += sprintf(pstr, ", \'%s\'",
...@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause( ...@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause(
pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
} }
} else { } else {
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) { if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value); pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value);
...@@ -1050,1902 +1106,2717 @@ static void dumpCreateMTableClause( ...@@ -1050,1902 +1106,2717 @@ static void dumpCreateMTableClause(
free(tmpBuf); free(tmpBuf);
} }
static int convertTbDesToAvroSchema( static int64_t getNtbCountOfStb(char *dbName, char *stbName)
char *dbName, char *tbName, TableDef *tableDes, int colCount,
char **avroSchema)
{ {
errorPrint("%s() LN%d TODO: covert table schema to avro schema\n", TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
__func__, __LINE__); dbName, g_args.port);
// { if (taos == NULL) {
// "namesapce": "database name", errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
// "type": "record",
// "name": "table name",
// "fields": [
// {
// "name": "col0 name",
// "type": "long"
// },
// {
// "name": "col1 name",
// "type": ["int", "null"]
// },
// {
// "name": "col2 name",
// "type": ["float", "null"]
// },
// ...
// {
// "name": "coln name",
// "type": ["string", "null"]
// }
// ]
// }
*avroSchema = (char *)calloc(1,
17 + TSDB_DB_NAME_LEN /* dbname section */
+ 17 /* type: record */
+ 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ 10 /* fields section */
+ (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
if (*avroSchema == NULL) {
errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
return -1; return -1;
} }
char *pstr = *avroSchema; int64_t count = 0;
pstr += sprintf(pstr,
"{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [", char command[COMMAND_SIZE];
dbName, tbName);
for (int i = 0; i < colCount; i ++) { sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
if (0 == i) {
pstr += sprintf(pstr, TAOS_RES *res = taos_query(taos, command);
"{\"name\": \"%s\", \"type\": \"%s\"", int32_t code = taos_errno(res);
tableDes->cols[i].field, "long"); if (code != 0) {
} else { errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
if (strcasecmp(tableDes->cols[i].type, "binary") == 0 || __func__, __LINE__, command, taos_errstr(res));
strcasecmp(tableDes->cols[i].type, "nchar") == 0) { taos_free_result(res);
pstr += sprintf(pstr, taos_close(taos);
"{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", return -1;
tableDes->cols[i].field, "string");
} else {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
tableDes->cols[i].field, tableDes->cols[i].type);
}
}
if ((i != (colCount -1))
&& (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
pstr += sprintf(pstr, "},");
} else {
pstr += sprintf(pstr, "}");
break;
}
} }
pstr += sprintf(pstr, "]}"); TAOS_ROW row = NULL;
debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema); if ((row = taos_fetch_row(res)) != NULL) {
count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
}
return 0; taos_close(taos);
return count;
} }
static int64_t dumpNormalTable( static int getTableDes(
char *dbName, TAOS *taos,
char *stable, char* dbName, char *table,
char *tbName, TableDef *tableDes, bool isSuperTable) {
int precision, TAOS_ROW row = NULL;
FILE *fp TAOS_RES* res = NULL;
) {
int colCount = 0; int colCount = 0;
TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) char sqlstr[COMMAND_SIZE];
+ sizeof(ColDes) * TSDB_MAX_COLUMNS); sprintf(sqlstr, "describe %s.%s;", dbName, table);
if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
colCount = getTableDes(dbName, tbName, tableDes, false);
if (colCount < 0) { res = taos_query(taos, sqlstr);
errorPrint("%s() LN%d, failed to get table[%s] schema\n", int32_t code = taos_errno(res);
__func__, if (code != 0) {
__LINE__, errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
tbName); __func__, __LINE__, sqlstr, taos_errstr(res));
free(tableDes); taos_free_result(res);
return -1; return -1;
} }
// create child-table using super-table TAOS_FIELD *fields = taos_fetch_fields(res);
dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
} else { // dump table definition
colCount = getTableDes(dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create normal-table or super-table tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
dumpCreateTableClause(tableDes, colCount, fp, dbName); while ((row = taos_fetch_row(res)) != NULL) {
tstrncpy(tableDes->cols[colCount].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
min(TSDB_COL_NAME_LEN + 1,
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
tstrncpy(tableDes->cols[colCount].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[colCount].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(tableDes->cols[colCount].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
colCount++;
} }
char *jsonAvroSchema = NULL; taos_free_result(res);
if (g_args.avro) { res = NULL;
if (0 != convertTbDesToAvroSchema(
dbName, tbName, tableDes, colCount, &jsonAvroSchema)) {
errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n",
__func__,
__LINE__);
freeTbDes(tableDes);
return -1;
}
}
int64_t ret = 0; if (isSuperTable) {
if (!g_args.schemaonly) { return colCount;
ret = dumpTableData(fp, tbName, dbName, precision,
jsonAvroSchema);
} }
tfree(jsonAvroSchema); // if child-table have tag, using select tagName from table to get tagValue
freeTbDes(tableDes); for (int i = 0 ; i < colCount; i++) {
return ret; if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
}
static int64_t dumpNormalTableBelongStb(
SDbInfo *dbInfo, char *stbName, char *ntbName)
{
int64_t count = 0;
char tmpBuf[4096] = {0};
FILE *fp = NULL;
if (g_args.outpath[0] != 0) { sprintf(sqlstr, "select %s from %s.%s",
sprintf(tmpBuf, "%s/%s.%s.sql", tableDes->cols[i].field, dbName, table);
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w"); res = taos_query(taos, sqlstr);
if (fp == NULL) { code = taos_errno(res);
errorPrint("%s() LN%d, failed to open file %s\n", if (code != 0) {
__func__, __LINE__, tmpBuf); errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
__func__, __LINE__, sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1; return -1;
} }
count = dumpNormalTable( fields = taos_fetch_fields(res);
dbInfo->name,
stbName,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
fclose(fp); row = taos_fetch_row(res);
return count; if (NULL == row) {
} errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
__func__, __LINE__, sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
}
static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName) if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
{ sprintf(tableDes->cols[i].note, "%s", "NUL");
int64_t count = 0; sprintf(tableDes->cols[i].value, "%s", "NULL");
taos_free_result(res);
res = NULL;
continue;
}
char tmpBuf[4096] = {0}; int32_t* length = taos_fetch_lengths(res);
FILE *fp = NULL;
if (g_args.outpath[0] != 0) { //int32_t* length = taos_fetch_lengths(tmpResult);
sprintf(tmpBuf, "%s/%s.%s.sql", switch (fields[0].type) {
g_args.outpath, dbInfo->name, ntbName); case TSDB_DATA_TYPE_BOOL:
sprintf(tableDes->cols[i].value, "%d",
((((int32_t)(*((char *)
row[TSDB_SHOW_TABLES_NAME_INDEX])))==1)
?1:0));
break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(tableDes->cols[i].value, "%d",
*((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_SMALLINT:
sprintf(tableDes->cols[i].value, "%d",
*((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_INT:
sprintf(tableDes->cols[i].value, "%d",
*((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(tableDes->cols[i].value, "%" PRId64 "",
*((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_FLOAT:
sprintf(tableDes->cols[i].value, "%f",
GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_DOUBLE:
sprintf(tableDes->cols[i].value, "%f",
GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_BINARY:
memset(tableDes->cols[i].value, 0,
sizeof(tableDes->cols[i].value));
int len = strlen((char *)row[0]);
// FIXME for long value
if (len < (COL_VALUEBUF_LEN - 2)) {
converStringToReadable(
(char *)row[0],
length[0],
tableDes->cols[i].value,
len);
} else { } else {
sprintf(tmpBuf, "%s.%s.sql", tableDes->cols[i].var_value = calloc(1, len * 2);
dbInfo->name, ntbName); if (tableDes->cols[i].var_value == NULL) {
errorPrint("%s() LN%d, memory alalocation failed!\n",
__func__, __LINE__);
taos_free_result(res);
return -1;
}
converStringToReadable((char *)row[0],
length[0],
(char *)(tableDes->cols[i].var_value), len);
} }
break;
fp = fopen(tmpBuf, "w"); case TSDB_DATA_TYPE_NCHAR:
if (fp == NULL) { {
errorPrint("%s() LN%d, failed to open file %s\n", memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
__func__, __LINE__, tmpBuf); char tbuf[COMMAND_SIZE-2]; // need reserve 2 bytes for ' '
return -1; convertNCharToReadable(
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
length[0], tbuf, COMMAND_SIZE-2);
sprintf(tableDes->cols[i].value, "%s", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
#if 0
if (!g_args.mysqlFlag) {
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
}
#endif
break;
default:
break;
} }
count = dumpNormalTable( taos_free_result(res);
dbInfo->name, }
NULL,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
fclose(fp); return colCount;
return count;
} }
static void *dumpNtbOfDb(void *arg) { static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
threadInfo *pThreadInfo = (threadInfo *)arg; FILE *fp, char* dbName) {
int counter = 0;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); int count_temp = 0;
debugPrint("dump table count = \t%"PRId64"\n", char sqlstr[COMMAND_SIZE];
pThreadInfo->tablesOfDumpOut);
FILE *fp = NULL; char* pstr = sqlstr;
char tmpBuf[4096] = {0};
if (g_args.outpath[0] != 0) { pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
sprintf(tmpBuf, "%s/%s.%d.sql", dbName, tableDes->name);
g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%d.sql",
pThreadInfo->dbName, pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w"); for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (fp == NULL) { if (counter == 0) {
errorPrint("%s() LN%d, failed to open file %s\n", pstr += sprintf(pstr, " (%s %s",
__func__, __LINE__, tmpBuf); tableDes->cols[counter].field, tableDes->cols[counter].type);
return NULL; } else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} }
int64_t count; if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
debugPrint("[%d] No.\t%"PRId64" table name: %s\n", pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
pThreadInfo->threadIndex, i,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name);
count = dumpNormalTable(
pThreadInfo->dbName,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name,
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} }
} }
fclose(fp); count_temp = counter;
return NULL;
}
static void *dumpNormalTablesOfStb(void *arg) {
threadInfo *pThreadInfo = (threadInfo *)arg;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut);
char command[COMMAND_SIZE];
sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", for (; counter < numOfCols; counter++) {
pThreadInfo->dbName, pThreadInfo->stbName, if (counter == count_temp) {
pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom); pstr += sprintf(pstr, ") TAGS (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
TAOS_RES *res = taos_query(pThreadInfo->taos, command); if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
int32_t code = taos_errno(res); || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
if (code) { pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", }
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return NULL;
} }
FILE *fp = NULL; pstr += sprintf(pstr, ");");
char tmpBuf[4096] = {0};
if (g_args.outpath[0] != 0) { debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
sprintf(tmpBuf, "%s/%s.%s.%d.sql", return fprintf(fp, "%s\n\n", sqlstr);
g_args.outpath, }
pThreadInfo->dbName,
pThreadInfo->stbName,
pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%s.%d.sql",
pThreadInfo->dbName,
pThreadInfo->stbName,
pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w"); static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp)
{
uint64_t sizeOfTableDes =
(uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
if (fp == NULL) { TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
errorPrint("%s() LN%d, failed to open file %s\n", if (NULL == tableDes) {
__func__, __LINE__, tmpBuf); errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
return NULL; __func__, __LINE__, sizeOfTableDes);
exit(-1);
} }
TAOS_ROW row = NULL; int colCount = getTableDes(taos, dbInfo->name,
int64_t i = 0; stbName, tableDes, true);
int64_t count;
while((row = taos_fetch_row(res)) != NULL) {
debugPrint("[%d] sub table %"PRId64": name: %s\n",
pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
count = dumpNormalTable( if (colCount < 0) {
pThreadInfo->dbName, free(tableDes);
pThreadInfo->stbName, errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], __func__, __LINE__, stbName);
pThreadInfo->precision, exit(-1);
fp);
if (count < 0) {
break;
}
} }
fclose(fp); dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
return NULL; free(tableDes);
return 0;
} }
static int64_t dumpNtbOfDbByThreads( static int64_t dumpCreateSTableClauseOfDb(
SDbInfo *dbInfo, SDbInfo *dbInfo, FILE *fp)
int64_t ntbCount)
{ {
if (ntbCount <= 0) { TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0; return 0;
} }
int threads = g_args.thread_num; TAOS_ROW row;
char command[COMMAND_SIZE] = {0};
int64_t a = ntbCount / threads; sprintf(command, "SHOW %s.STABLES", dbInfo->name);
if (a < 1) {
threads = ntbCount;
a = 1;
}
assert(threads); TAOS_RES* res = taos_query(taos, command);
int64_t b = ntbCount % threads; int32_t code = taos_errno(res);
if (code != 0) {
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
exit(-1);
}
threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); int64_t superTblCnt = 0;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); while ((row = taos_fetch_row(res)) != NULL) {
assert(pids); if (0 == dumpStableClasuse(taos, dbInfo,
assert(infos); row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
superTblCnt ++;
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->taos = taos_connect(
g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
return -1;
} }
pThreadInfo->threadIndex = i;
pThreadInfo->tablesOfDumpOut = (i<b)?a+1:a;
pThreadInfo->tableFrom = (i==0)?0:
((threadInfo *)(infos + i - 1))->tableFrom +
((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
} }
for (int64_t i = 0; i < threads; i++) { taos_free_result(res);
pthread_join(pids[i], NULL);
}
for (int64_t i = 0; i < threads; i++) { fprintf(g_fpOfResult,
threadInfo *pThreadInfo = infos + i; "# super table counter: %"PRId64"\n",
taos_close(pThreadInfo->taos); superTblCnt);
} g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
free(pids); taos_close(taos);
free(infos);
return 0; return superTblCnt;
} }
static int64_t getNtbCountOfStb(char *dbName, char *stbName) static void dumpCreateDbClause(
{ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, char sqlstr[TSDB_MAX_SQL_LEN] = {0};
dbName, g_args.port);
if (taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
return -1;
}
int64_t count = 0; char *pstr = sqlstr;
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
"REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
dbInfo->replica, dbInfo->quorum, dbInfo->days,
dbInfo->keeplist,
dbInfo->cache,
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
dbInfo->fsync,
dbInfo->cachelast,
dbInfo->comp, dbInfo->precision, dbInfo->update);
}
char command[COMMAND_SIZE]; pstr += sprintf(pstr, ";");
fprintf(fp, "%s\n\n", sqlstr);
}
sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); static FILE* openDumpInFile(char *fptr) {
wordexp_t full_path;
TAOS_RES *res = taos_query(taos, command); if (wordexp(fptr, &full_path, 0) != 0) {
int32_t code = taos_errno(res); errorPrint("illegal file name: %s\n", fptr);
if (code != 0) { return NULL;
errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
} }
TAOS_ROW row = NULL; char *fname = full_path.we_wordv[0];
if ((row = taos_fetch_row(res)) != NULL) { FILE *f = NULL;
count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; if ((fname) && (strlen(fname) > 0)) {
f = fopen(fname, "r");
if (f == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, fname);
}
} }
taos_close(taos); wordfree(&full_path);
return count; return f;
} }
static int64_t dumpNtbOfStbByThreads( static uint64_t getFilesNum(char *ext)
SDbInfo *dbInfo, char *stbName)
{ {
int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); uint64_t count = 0;
if (ntbCount <= 0) {
return 0;
}
int threads = g_args.thread_num;
int64_t a = ntbCount / threads; int namelen, extlen;
if (a < 1) { struct dirent *pDirent;
threads = ntbCount; DIR *pDir;
a = 1;
}
assert(threads); extlen = strlen(ext);
int64_t b = ntbCount % threads;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); bool isSql = (0 == strcmp(ext, "sql"));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
assert(pids);
assert(infos);
for (int64_t i = 0; i < threads; i++) { pDir = opendir(g_args.inpath);
threadInfo *pThreadInfo = infos + i; if (pDir != NULL) {
pThreadInfo->taos = taos_connect( while ((pDirent = readdir(pDir)) != NULL) {
g_args.host, namelen = strlen (pDirent->d_name);
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
return -1; if (namelen > extlen) {
if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
if (isSql) {
if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
continue;
} }
pThreadInfo->threadIndex = i;
pThreadInfo->tablesOfDumpOut = (i<b)?a+1:a;
pThreadInfo->tableFrom = (i==0)?0:
((threadInfo *)(infos + i - 1))->tableFrom +
((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
strcpy(pThreadInfo->stbName, stbName);
pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
} }
verbosePrint("%s found\n", pDirent->d_name);
for (int64_t i = 0; i < threads; i++) { count ++;
pthread_join(pids[i], NULL);
} }
int64_t records = 0;
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
records += pThreadInfo->rowsOfDumpOut;
taos_close(pThreadInfo->taos);
} }
free(pids);
free(infos);
return records;
}
static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp)
{
uint64_t sizeOfTableDes =
(uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
if (NULL == tableDes) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
__func__, __LINE__, sizeOfTableDes);
exit(-1);
} }
closedir (pDir);
int colCount = getTableDes(dbInfo->name,
stbName, tableDes, true);
if (colCount < 0) {
free(tableDes);
errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
__func__, __LINE__, stbName);
exit(-1);
} }
dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); debugPrint("%"PRId64" .%s files found!\n", count, ext);
free(tableDes); return count;
return 0;
} }
static int64_t dumpCreateSTableClauseOfDb( static void freeFileList(char **fileList, int64_t count)
SDbInfo *dbInfo, FILE *fp)
{ {
TAOS *taos = taos_connect(g_args.host, for (int64_t i = 0; i < count; i++) {
g_args.user, g_args.password, dbInfo->name, g_args.port); tfree(fileList[i]);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
} }
tfree(fileList);
}
TAOS_ROW row; static void createDumpinList(char *ext, int64_t count)
char command[COMMAND_SIZE] = {0}; {
bool isSql = (0 == strcmp(ext, "sql"));
sprintf(command, "SHOW %s.STABLES", dbInfo->name); if (isSql) {
g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *));
assert(g_tsDumpInSqlFiles);
TAOS_RES* res = taos_query(taos, command); for (int64_t i = 0; i < count; i++) {
int32_t code = taos_errno(res); g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
if (code != 0) { assert(g_tsDumpInSqlFiles[i]);
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", }
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
exit(-1);
} }
#ifdef AVRO_SUPPORT
else {
g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *));
assert(g_tsDumpInAvroFiles);
int64_t superTblCnt = 0; for (int64_t i = 0; i < count; i++) {
while ((row = taos_fetch_row(res)) != NULL) { g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { assert(g_tsDumpInAvroFiles[i]);
superTblCnt ++;
} }
} }
#endif
taos_free_result(res); int namelen, extlen;
struct dirent *pDirent;
DIR *pDir;
fprintf(g_fpOfResult, extlen = strlen(ext);
"# super table counter: %"PRId64"\n",
superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
taos_close(taos); count = 0;
pDir = opendir(g_args.inpath);
if (pDir != NULL) {
while ((pDirent = readdir(pDir)) != NULL) {
namelen = strlen (pDirent->d_name);
if (namelen > extlen) {
if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
verbosePrint("%s found\n", pDirent->d_name);
if (isSql) {
if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
continue;
}
strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
}
#ifdef AVRO_SUPPORT
else {
strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
}
#endif
}
}
}
closedir (pDir);
}
return superTblCnt; debugPrint("%"PRId64" .%s files filled to list!\n", count, ext);
} }
static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) #ifdef AVRO_SUPPORT
{
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
}
char command[COMMAND_SIZE];
TAOS_RES *result;
int32_t code;
sprintf(command, "USE %s", dbInfo->name); static int convertTbDesToJson(
result = taos_query(taos, command); char *dbName, char *tbName, TableDef *tableDes, int colCount,
code = taos_errno(result); char **jsonSchema)
if (code != 0) { {
errorPrint("invalid database %s, reason: %s\n", // {
dbInfo->name, taos_errstr(result)); // "type": "record",
taos_close(taos); // "name": "dbname.tbname",
return 0; // "fields": [
// {
// "name": "col0 name",
// "type": "long"
// },
// {
// "name": "col1 name",
// "type": "int"
// },
// {
// "name": "col2 name",
// "type": "float"
// },
// {
// "name": "col3 name",
// "type": "boolean"
// },
// ...
// {
// "name": "coln name",
// "type": "string"
// }
// ]
// }
*jsonSchema = (char *)calloc(1,
17 + TSDB_DB_NAME_LEN /* dbname section */
+ 17 /* type: record */
+ 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ 10 /* fields section */
+ (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
if (*jsonSchema == NULL) {
errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
return -1;
} }
sprintf(command, "SHOW TABLES"); char *pstr = *jsonSchema;
result = taos_query(taos, command); pstr += sprintf(pstr,
code = taos_errno(result); "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [",
if (code != 0) { dbName, tbName);
errorPrint("Failed to show %s\'s tables, reason: %s\n", for (int i = 0; i < colCount; i ++) {
dbInfo->name, taos_errstr(result)); if (0 == i) {
taos_close(taos); pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else {
if (strcasecmp(tableDes->cols[i].type, "binary") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "string");
} else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "bytes");
} else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "boolean");
} else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "int");
} else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "int");
} else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field,
strtolower(tableDes->cols[i].type, tableDes->cols[i].type));
}
}
if ((i != (colCount -1))
&& (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
pstr += sprintf(pstr, "},");
} else {
pstr += sprintf(pstr, "}");
break;
}
}
pstr += sprintf(pstr, "]}");
debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema);
return 0; return 0;
}
static void print_json_indent(int indent) {
int i;
for (i = 0; i < indent; i++) {
putchar(' ');
} }
}
g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); const char *json_plural(size_t count) { return count == 1 ? "" : "s"; }
TAOS_ROW row; static void print_json_object(json_t *element, int indent) {
int64_t count = 0; size_t size;
while(NULL != (row = taos_fetch_row(result))) { const char *key;
debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", json_t *value;
__func__, __LINE__,
count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); print_json_indent(indent);
tstrncpy(((TableInfo *)(g_tablesList + count))->name, size = json_object_size(element);
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size));
if (stbName) { json_object_foreach(element, key, value) {
tstrncpy(((TableInfo *)(g_tablesList + count))->stable, print_json_indent(indent + 2);
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); printf("JSON Key: \"%s\"\n", key);
((TableInfo *)(g_tablesList + count))->belongStb = true; print_json_aux(value, indent + 2);
} }
count ++; }
static void print_json_array(json_t *element, int indent) {
size_t i;
size_t size = json_array_size(element);
print_json_indent(indent);
printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size));
for (i = 0; i < size; i++) {
print_json_aux(json_array_get(element, i), indent + 2);
} }
taos_close(taos); }
int64_t records = dumpNtbOfDbByThreads(dbInfo, count); static void print_json_string(json_t *element, int indent) {
print_json_indent(indent);
printf("JSON String: \"%s\"\n", json_string_value(element));
}
free(g_tablesList); static void print_json_integer(json_t *element, int indent) {
g_tablesList = NULL; print_json_indent(indent);
printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element));
}
return records; static void print_json_real(json_t *element, int indent) {
print_json_indent(indent);
printf("JSON Real: %f\n", json_real_value(element));
} }
static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) static void print_json_true(json_t *element, int indent) {
(void)element;
print_json_indent(indent);
printf("JSON True\n");
}
static void print_json_false(json_t *element, int indent) {
(void)element;
print_json_indent(indent);
printf("JSON False\n");
}
static void print_json_null(json_t *element, int indent) {
(void)element;
print_json_indent(indent);
printf("JSON Null\n");
}
static void print_json_aux(json_t *element, int indent)
{ {
dumpCreateDbClause(dbInfo, g_args.with_property, fp); switch(json_typeof(element)) {
case JSON_OBJECT:
print_json_object(element, indent);
break;
fprintf(g_fpOfResult, "\n#### database: %s\n", case JSON_ARRAY:
dbInfo->name); print_json_array(element, indent);
g_resultStatistics.totalDatabasesOfDumpOut++; break;
dumpCreateSTableClauseOfDb(dbInfo, fp); case JSON_STRING:
print_json_string(element, indent);
break;
return dumpNTablesOfDb(dbInfo); case JSON_INTEGER:
print_json_integer(element, indent);
break;
case JSON_REAL:
print_json_real(element, indent);
break;
case JSON_TRUE:
print_json_true(element, indent);
break;
case JSON_FALSE:
print_json_false(element, indent);
break;
case JSON_NULL:
print_json_null(element, indent);
break;
default:
fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element));
}
} }
static int dumpOut() { static void print_json(json_t *root) { print_json_aux(root, 0); }
TAOS *taos = NULL;
TAOS_RES *result = NULL;
TAOS_ROW row; static json_t *load_json(char *jsonbuf)
FILE *fp = NULL; {
int32_t count = 0; json_t *root;
json_error_t error;
char tmpBuf[4096] = {0}; root = json_loads(jsonbuf, 0, &error);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); if (root) {
return root;
} else { } else {
sprintf(tmpBuf, "dbs.sql"); fprintf(stderr, "json error on line %d: %s\n", error.line, error.text);
return NULL;
} }
}
fp = fopen(tmpBuf, "w"); static RecordSchema *parse_json_to_recordschema(json_t *element)
if (fp == NULL) { {
errorPrint("%s() LN%d, failed to open file %s\n", RecordSchema *recordSchema = malloc(sizeof(RecordSchema));
__func__, __LINE__, tmpBuf); assert(recordSchema);
return -1;
if (JSON_OBJECT != json_typeof(element)) {
fprintf(stderr, "%s() LN%d, json passed is not an object\n",
__func__, __LINE__);
return NULL;
} }
g_args.dumpDbCount = getDumpDbCount(); const char *key;
debugPrint("%s() LN%d, dump db count: %d\n", json_t *value;
__func__, __LINE__, g_args.dumpDbCount);
if (0 == g_args.dumpDbCount) { json_object_foreach(element, key, value) {
errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); if (0 == strcmp(key, "name")) {
fclose(fp); tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1);
return -1; } else if (0 == strcmp(key, "fields")) {
} if (JSON_ARRAY == json_typeof(value)) {
g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); size_t i;
if (g_dbInfos == NULL) { size_t size = json_array_size(value);
errorPrint("%s() LN%d, failed to allocate memory\n",
verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n",
__func__, __LINE__,
(long long)size, json_plural(size));
recordSchema->num_fields = size;
recordSchema->fields = malloc(sizeof(FieldStruct) * size);
assert(recordSchema->fields);
for (i = 0; i < size; i++) {
FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
json_t *arr_element = json_array_get(value, i);
const char *ele_key;
json_t *ele_value;
json_object_foreach(arr_element, ele_key, ele_value) {
if (0 == strcmp(ele_key, "name")) {
tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1);
} else if (0 == strcmp(ele_key, "type")) {
if (JSON_STRING == json_typeof(ele_value)) {
tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1);
} else if (JSON_OBJECT == json_typeof(ele_value)) {
const char *obj_key;
json_t *obj_value;
json_object_foreach(ele_value, obj_key, obj_value) {
if (0 == strcmp(obj_key, "type")) {
if (JSON_STRING == json_typeof(obj_value)) {
tstrncpy(field->type,
json_string_value(obj_value), TYPE_NAME_LEN-1);
}
}
}
}
}
}
}
} else {
fprintf(stderr, "%s() LN%d, fields have no array\n",
__func__, __LINE__); __func__, __LINE__);
goto _exit_failure; return NULL;
} }
char command[COMMAND_SIZE]; break;
}
/* Connect to server */
taos = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port);
if (taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
goto _exit_failure;
} }
/* --------------------------------- Main Code -------------------------------- */ return recordSchema;
/* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ }
/* */
dumpCharset(fp);
sprintf(command, "show databases"); static void freeRecordSchema(RecordSchema *recordSchema)
result = taos_query(taos, command); {
int32_t code = taos_errno(result); if (recordSchema) {
if (recordSchema->fields) {
free(recordSchema->fields);
}
free(recordSchema);
}
}
if (code != 0) { static int64_t writeResultToAvro(
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", char *avroFilename,
__func__, __LINE__, command, taos_errstr(result)); char *jsonSchema,
goto _exit_failure; TAOS_RES *res)
{
avro_schema_t schema;
if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) {
errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n",
__func__, __LINE__, jsonSchema, avro_strerror());
exit(EXIT_FAILURE);
} }
TAOS_FIELD *fields = taos_fetch_fields(result); json_t *json_root = load_json(jsonSchema);
debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
while ((row = taos_fetch_row(result)) != NULL) { RecordSchema *recordSchema;
// sys database name : 'log', but subsequent version changed to 'log' if (json_root) {
if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", if (g_args.debug_print || g_args.verbose_print) {
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) print_json(json_root);
&& (!g_args.allow_sys)) {
continue;
} }
if (g_args.databases) { // input multi dbs recordSchema = parse_json_to_recordschema(json_root);
if (inDatabasesSeq( if (NULL == recordSchema) {
(char *)row[TSDB_SHOW_DB_NAME_INDEX], fprintf(stderr, "Failed to parse json to recordschema\n");
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { exit(EXIT_FAILURE);
continue;
} }
} else if (!g_args.all_databases) { // only input one db
if (strncasecmp(g_args.arg_list[0], json_decref(json_root);
(char *)row[TSDB_SHOW_DB_NAME_INDEX], } else {
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema);
continue; exit(EXIT_FAILURE);
} }
g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); avro_file_writer_t db;
if (g_dbInfos[count] == NULL) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", int rval = avro_file_writer_create_with_codec
__func__, __LINE__, (uint64_t)sizeof(SDbInfo)); (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0);
goto _exit_failure; if (rval) {
errorPrint("There was an error creating %s. reason: %s\n",
avroFilename, avro_strerror());
exit(EXIT_FAILURE);
} }
okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); TAOS_ROW row = NULL;
tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
min(TSDB_DB_NAME_LEN,
fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
g_dbInfos[count]->ntables =
*((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups =
*((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
g_dbInfos[count]->replica =
*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
g_dbInfos[count]->quorum =
*((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days =
*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
tstrncpy(g_dbInfos[count]->keeplist, int numFields = taos_field_count(res);
(char *)row[TSDB_SHOW_DB_KEEP_INDEX], assert(numFields > 0);
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); TAOS_FIELD *fields = taos_fetch_fields(res);
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
g_dbInfos[count]->cache =
*((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
g_dbInfos[count]->blocks =
*((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
g_dbInfos[count]->minrows =
*((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
g_dbInfos[count]->maxrows =
*((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
g_dbInfos[count]->wallevel =
*((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
g_dbInfos[count]->fsync =
*((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
g_dbInfos[count]->comp =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(g_dbInfos[count]->precision, avro_value_iface_t *wface =
(char *)row[TSDB_SHOW_DB_PRECISION_INDEX], avro_generic_class_from_schema(schema);
DB_PRECISION_LEN);
g_dbInfos[count]->update = avro_value_t record;
*((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); avro_generic_value_new(wface, &record);
int64_t count = 0;
while ((row = taos_fetch_row(res)) != NULL) {
avro_value_t value;
for (int col = 0; col < numFields; col++) {
if (0 != avro_value_get_by_name(
&record, fields[col].name, &value, NULL)) {
errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed",
__func__, __LINE__, fields[col].name);
continue;
} }
count++;
if (g_args.databases) { int len;
if (count > g_args.dumpDbCount) switch (fields[col].type) {
case TSDB_DATA_TYPE_BOOL:
avro_value_set_boolean(&value,
((((int32_t)(*((char *)row[col])))==1)?1:0));
break; break;
} else if (!g_args.all_databases) {
if (count >= 1) case TSDB_DATA_TYPE_TINYINT:
avro_value_set_int(&value, *((int8_t *)row[col]));
break; break;
}
}
if (count == 0) { case TSDB_DATA_TYPE_SMALLINT:
errorPrint("%d databases valid to dump\n", count); avro_value_set_int(&value, *((int16_t *)row[col]));
goto _exit_failure; break;
}
taos_close(taos); case TSDB_DATA_TYPE_INT:
avro_value_set_int(&value, *((int32_t *)row[col]));
break;
if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases case TSDB_DATA_TYPE_BIGINT:
for (int i = 0; i < count; i++) { avro_value_set_long(&value, *((int64_t *)row[col]));
int64_t records = 0; break;
records = dumpWholeDatabase(g_dbInfos[i], fp);
if (records >= 0) { case TSDB_DATA_TYPE_FLOAT:
okPrint("Database %s dumped\n", g_dbInfos[i]->name); avro_value_set_float(&value, GET_FLOAT_VAL(row[col]));
g_totalDumpOutRows += records; break;
case TSDB_DATA_TYPE_DOUBLE:
avro_value_set_double(&value, GET_DOUBLE_VAL(row[col]));
break;
case TSDB_DATA_TYPE_BINARY:
avro_value_set_string(&value, (char *)row[col]);
break;
case TSDB_DATA_TYPE_NCHAR:
len = strlen((char*)row[col]);
avro_value_set_bytes(&value, (void*)(row[col]),len);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
avro_value_set_long(&value, *((int64_t *)row[col]));
break;
default:
break;
} }
} }
if (0 != avro_file_writer_append_value(db, &record)) {
errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n",
__func__, __LINE__,
avro_strerror());
} else { } else {
if (1 == g_args.arg_list_len) { count ++;
int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[0]->name);
g_totalDumpOutRows += records;
} }
} else {
dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
} }
int superTblCnt = 0 ; avro_value_decref(&record);
for (int i = 1; g_args.arg_list[i]; i++) { avro_value_iface_decref(wface);
TableRecordInfo tableRecordInfo; freeRecordSchema(recordSchema);
avro_file_writer_close(db);
avro_schema_decref(schema);
if (getTableRecordInfo(g_dbInfos[0]->name, return count;
g_args.arg_list[i], }
&tableRecordInfo) < 0) {
errorPrint("input the invalid table %s\n",
g_args.arg_list[i]);
continue;
}
int64_t records = 0; void freeBindArray(char *bindArray, int onlyCol)
if (tableRecordInfo.isStb) { // dump all table of this stable {
int ret = dumpStableClasuse( TAOS_BIND *bind;
g_dbInfos[0],
tableRecordInfo.tableRecord.stable, for (int j = 0; j < onlyCol; j++) {
fp); bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j));
if (ret >= 0) { if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type)
superTblCnt++; && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) {
records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); tfree(bind->buffer);
} }
} else if (tableRecordInfo.belongStb){
dumpStableClasuse(
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp);
records = dumpNormalTableBelongStb(
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
g_args.arg_list[i]);
} else {
records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]);
} }
}
if (records >= 0) { static int dumpInOneAvroFile(char* fcharset,
okPrint("table: %s dumped\n", g_args.arg_list[i]); char* encode, char *avroFilepath)
g_totalDumpOutRows += records; {
debugPrint("avroFilepath: %s\n", avroFilepath);
avro_file_reader_t reader;
if(avro_file_reader(avroFilepath, &reader)) {
fprintf(stderr, "Unable to open avro file %s: %s\n",
avroFilepath, avro_strerror());
return -1;
} }
int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4;
char *jsonbuf = calloc(1, buf_len);
assert(jsonbuf);
avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);;
avro_schema_t schema;
schema = avro_file_reader_get_writer_schema(reader);
avro_schema_to_json(schema, jsonwriter);
if (0 == strlen(jsonbuf)) {
errorPrint("Failed to parse avro file: %s schema. reason: %s\n",
avroFilepath, avro_strerror());
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1;
} }
debugPrint("Schema:\n %s\n", jsonbuf);
json_t *json_root = load_json(jsonbuf);
debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
if (g_args.debug_print) {
print_json(json_root);
} }
/* Close the handle and return */ const char *namespace = avro_schema_namespace((const avro_schema_t)schema);
fclose(fp); debugPrint("Namespace: %s\n", namespace);
taos_free_result(result);
freeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return 0;
_exit_failure: TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
fclose(fp); namespace, g_args.port);
taos_close(taos); if (taos == NULL) {
taos_free_result(result); errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
freeDbInfos();
errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return -1; return -1;
} }
static int getTableDes( TAOS_STMT *stmt = taos_stmt_init(taos);
char* dbName, char *table, if (NULL == stmt) {
TableDef *tableDes, bool isSuperTable) { taos_close(taos);
TAOS_ROW row = NULL; errorPrint("%s() LN%d, stmt init failed! reason: %s\n",
TAOS_RES* res = NULL; __func__, __LINE__, taos_errstr(NULL));
int colCount = 0; return -1;
}
TAOS *taos = taos_connect(g_args.host, RecordSchema *recordSchema = parse_json_to_recordschema(json_root);
g_args.user, g_args.password, dbName, g_args.port); if (NULL == recordSchema) {
if (NULL == taos) { errorPrint("Failed to parse json to recordschema. reason: %s\n",
errorPrint( avro_strerror());
"Failed to connect to TDengine server %s by specified database %s\n", avro_schema_decref(schema);
g_args.host, dbName); avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1; return -1;
} }
json_decref(json_root);
char sqlstr[COMMAND_SIZE]; TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
sprintf(sqlstr, "describe %s.%s;", dbName, table); + sizeof(ColDes) * TSDB_MAX_COLUMNS);
res = taos_query(taos, sqlstr); int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false);
int32_t code = taos_errno(res);
if (code != 0) { if (allColCount < 0) {
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__, __LINE__, sqlstr, taos_errstr(res)); __func__,
taos_free_result(res); __LINE__,
taos_close(taos); recordSchema->name);
free(tableDes);
freeRecordSchema(recordSchema);
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1; return -1;
} }
TAOS_FIELD *fields = taos_fetch_fields(res); char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
assert(stmtBuffer);
char *pstr = stmtBuffer;
pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); int onlyCol = 1; // at least timestamp
while ((row = taos_fetch_row(res)) != NULL) { for (int col = 1; col < allColCount; col++) {
tstrncpy(tableDes->cols[colCount].field, if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue;
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], pstr += sprintf(pstr, ",?");
min(TSDB_COL_NAME_LEN + 1, onlyCol ++;
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
tstrncpy(tableDes->cols[colCount].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[colCount].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(tableDes->cols[colCount].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
colCount++;
} }
pstr += sprintf(pstr, ")");
taos_free_result(res); if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) {
res = NULL; errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n",
taos_stmt_errstr(stmt));
if (isSuperTable) { free(stmtBuffer);
return colCount; free(tableDes);
freeRecordSchema(recordSchema);
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1;
} }
// if child-table have tag, using select tagName from table to get tagValue if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) {
for (int i = 0 ; i < colCount; i++) { errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n",
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; recordSchema->name, taos_stmt_errstr(stmt));
sprintf(sqlstr, "select %s from %s.%s",
tableDes->cols[i].field, dbName, table);
res = taos_query(taos, sqlstr); free(stmtBuffer);
code = taos_errno(res); free(tableDes);
if (code != 0) { avro_schema_decref(schema);
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", avro_file_reader_close(reader);
__func__, __LINE__, sqlstr, taos_errstr(res)); avro_writer_free(jsonwriter);
taos_free_result(res);
taos_close(taos);
return -1; return -1;
} }
fields = taos_fetch_fields(res); avro_value_iface_t *value_class = avro_generic_class_from_schema(schema);
avro_value_t value;
avro_generic_value_new(value_class, &value);
row = taos_fetch_row(res); char *bindArray =
if (NULL == row) { malloc(sizeof(TAOS_BIND) * onlyCol);
errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", assert(bindArray);
__func__, __LINE__, sqlstr, taos_errstr(res));
taos_free_result(res); int success = 0;
taos_close(taos); int failed = 0;
return -1; while(!avro_file_reader_read_value(reader, &value)) {
memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol);
TAOS_BIND *bind;
for (int i = 0; i < recordSchema->num_fields; i++) {
bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
avro_value_t field_value;
FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
if (0 == i) {
int64_t *ts = malloc(sizeof(int64_t));
assert(ts);
avro_value_get_by_name(&value, field->name, &field_value, NULL);
avro_value_get_long(&field_value, ts);
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t);
bind->buffer = ts;
bind->length = &bind->buffer_length;
bind->is_null = NULL;
} else if (0 == avro_value_get_by_name(
&value, field->name, &field_value, NULL)) {
if (0 == strcasecmp(tableDes->cols[i].type, "int")) {
int32_t *n32 = malloc(sizeof(int32_t));
assert(n32);
avro_value_get_int(&field_value, n32);
debugPrint("%d | ", *n32);
bind->buffer_type = TSDB_DATA_TYPE_INT;
bind->buffer_length = sizeof(int32_t);
bind->buffer = n32;
} else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) {
int32_t *n8 = malloc(sizeof(int32_t));
assert(n8);
avro_value_get_int(&field_value, n8);
debugPrint("%d | ", *n8);
bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
bind->buffer_length = sizeof(int8_t);
bind->buffer = (int8_t *)n8;
} else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) {
int32_t *n16 = malloc(sizeof(int32_t));
assert(n16);
avro_value_get_int(&field_value, n16);
debugPrint("%d | ", *n16);
bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
bind->buffer_length = sizeof(int16_t);
bind->buffer = (int32_t*)n16;
} else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) {
int64_t *n64 = malloc(sizeof(int64_t));
assert(n64);
avro_value_get_long(&field_value, n64);
debugPrint("%"PRId64" | ", *n64);
bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
bind->buffer_length = sizeof(int64_t);
bind->buffer = n64;
} else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) {
int64_t *n64 = malloc(sizeof(int64_t));
assert(n64);
avro_value_get_long(&field_value, n64);
debugPrint("%"PRId64" | ", *n64);
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t);
bind->buffer = n64;
} else if (0 == strcasecmp(tableDes->cols[i].type, "float")) {
float *f = malloc(sizeof(float));
assert(f);
avro_value_get_float(&field_value, f);
debugPrint("%f | ", *f);
bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
bind->buffer_length = sizeof(float);
bind->buffer = f;
} else if (0 == strcasecmp(tableDes->cols[i].type, "double")) {
double *dbl = malloc(sizeof(double));
assert(dbl);
avro_value_get_double(&field_value, dbl);
debugPrint("%f | ", *dbl);
bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
bind->buffer_length = sizeof(double);
bind->buffer = dbl;
} else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) {
size_t size;
char *buf = NULL;
avro_value_get_string(&field_value, (const char **)&buf, &size);
debugPrint("%s | ", (char *)buf);
bind->buffer_type = TSDB_DATA_TYPE_BINARY;
bind->buffer_length = tableDes->cols[i].length;
bind->buffer = buf;
} else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) {
size_t bytessize;
void *bytesbuf = NULL;
avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize);
debugPrint("%s | ", (char*)bytesbuf);
bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
bind->buffer_length = tableDes->cols[i].length;
bind->buffer = bytesbuf;
} else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) {
int32_t *bl = malloc(sizeof(int32_t));
assert(bl);
avro_value_get_boolean(&field_value, bl);
debugPrint("%s | ", (*bl)?"true":"false");
bind->buffer_type = TSDB_DATA_TYPE_BOOL;
bind->buffer_length = sizeof(int8_t);
bind->buffer = (int8_t*)bl;
}
bind->length = &bind->buffer_length;
bind->is_null = NULL;
}
}
debugPrint("%s", "\n");
if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
freeBindArray(bindArray, onlyCol);
failed --;
continue;
}
if (0 != taos_stmt_add_batch(stmt)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
freeBindArray(bindArray, onlyCol);
failed --;
continue;
} }
if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { freeBindArray(bindArray, onlyCol);
sprintf(tableDes->cols[i].note, "%s", "NUL");
sprintf(tableDes->cols[i].value, "%s", "NULL"); success ++;
taos_free_result(res);
res = NULL;
continue; continue;
} }
int32_t* length = taos_fetch_lengths(res); if (0 != taos_stmt_execute(stmt)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
failed = success;
}
//int32_t* length = taos_fetch_lengths(tmpResult); avro_value_decref(&value);
switch (fields[0].type) { avro_value_iface_decref(value_class);
tfree(bindArray);
tfree(stmtBuffer);
tfree(tableDes);
freeRecordSchema(recordSchema);
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
tfree(jsonbuf);
taos_stmt_close(stmt);
taos_close(taos);
if (failed < 0)
return failed;
return success;
}
static void* dumpInAvroWorkThreadFp(void *arg)
{
threadInfo *pThread = (threadInfo*)arg;
setThreadName("dumpInAvroWorkThrd");
verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n",
pThread->threadIndex, pThread->count, pThread->from);
for (int64_t i = 0; i < pThread->count; i++) {
char avroFile[MAX_PATH_LEN];
sprintf(avroFile, "%s/%s", g_args.inpath,
g_tsDumpInAvroFiles[pThread->from + i]);
if (0 == dumpInOneAvroFile(g_tsCharset,
g_args.encode,
avroFile)) {
okPrint("[%d] Success dump in file: %s\n",
pThread->threadIndex, avroFile);
}
}
return NULL;
}
static int64_t dumpInAvroWorkThreads()
{
int64_t ret = 0;
int32_t threads = g_args.thread_num;
uint64_t avroFileCount = getFilesNum("avro");
if (0 == avroFileCount) {
debugPrint("No .avro file found in %s\n", g_args.inpath);
return 0;
}
createDumpinList("avro", avroFileCount);
threadInfo *pThread;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = (threadInfo *)calloc(
threads, sizeof(threadInfo));
assert(pids);
assert(infos);
int64_t a = avroFileCount / threads;
if (a < 1) {
threads = avroFileCount;
a = 1;
}
int64_t b = 0;
if (threads != 0) {
b = avroFileCount % threads;
}
int64_t from = 0;
for (int32_t t = 0; t < threads; ++t) {
pThread = infos + t;
pThread->threadIndex = t;
pThread->from = from;
pThread->count = t<b?a+1:a;
from += pThread->count;
verbosePrint(
"Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n",
t, pThread->count, pThread->from);
if (pthread_create(pids + t, NULL,
dumpInAvroWorkThreadFp, (void*)pThread) != 0) {
errorPrint("%s() LN%d, thread[%d] failed to start\n",
__func__, __LINE__, pThread->threadIndex);
exit(EXIT_FAILURE);
}
}
for (int t = 0; t < threads; ++t) {
pthread_join(pids[t], NULL);
}
free(infos);
free(pids);
freeFileList(g_tsDumpInAvroFiles, avroFileCount);
return ret;
}
#endif /* AVRO_SUPPORT */
static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
{
int64_t totalRows = 0;
int32_t sql_buf_len = g_args.max_sql_len;
char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
if (tmpBuffer == NULL) {
errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
return -1;
}
char *pstr = tmpBuffer;
TAOS_ROW row = NULL;
int rowFlag = 0;
int64_t lastRowsPrint = 5000000;
int count = 0;
int numFields = taos_field_count(res);
assert(numFields > 0);
TAOS_FIELD *fields = taos_fetch_fields(res);
int32_t curr_sqlstr_len = 0;
int32_t total_sqlstr_len = 0;
while ((row = taos_fetch_row(res)) != NULL) {
curr_sqlstr_len = 0;
int32_t* length = taos_fetch_lengths(res); // act len
if (count == 0) {
total_sqlstr_len = 0;
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
if (g_args.mysqlFlag) {
if (0 == rowFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
rowFlag++;
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
}
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
}
}
for (int col = 0; col < numFields; col++) {
if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
if (row[col] == NULL) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
continue;
}
switch (fields[col].type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
sprintf(tableDes->cols[i].value, "%d", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0)); ((((int32_t)(*((char *)row[col])))==1)?1:0));
break; break;
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
sprintf(tableDes->cols[i].value, "%d", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); *((int8_t *)row[col]));
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
sprintf(tableDes->cols[i].value, "%d", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); *((int16_t *)row[col]));
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
sprintf(tableDes->cols[i].value, "%d", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); *((int32_t *)row[col]));
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
sprintf(tableDes->cols[i].value, "%" PRId64 "", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
*((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); "%" PRId64 "",
*((int64_t *)row[col]));
break; break;
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
sprintf(tableDes->cols[i].value, "%f", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); GET_FLOAT_VAL(row[col]));
break; break;
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
sprintf(tableDes->cols[i].value, "%f", curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); GET_DOUBLE_VAL(row[col]));
break; break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
memset(tableDes->cols[i].value, 0, {
sizeof(tableDes->cols[i].value)); char tbuf[COMMAND_SIZE] = {0};
int len = strlen((char *)row[0]); converStringToReadable((char *)row[col], length[col],
// FIXME for long value tbuf, COMMAND_SIZE);
if (len < (COL_VALUEBUF_LEN - 2)) { curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
converStringToReadable( "\'%s\'", tbuf);
(char *)row[0],
length[0],
tableDes->cols[i].value,
len);
} else {
tableDes->cols[i].var_value = calloc(1, len * 2);
if (tableDes->cols[i].var_value == NULL) {
errorPrint("%s() LN%d, memory alalocation failed!\n",
__func__, __LINE__);
taos_free_result(res);
return -1;
}
converStringToReadable((char *)row[0],
length[0],
(char *)(tableDes->cols[i].var_value), len);
}
break; break;
}
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
{ {
memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note)); char tbuf[COMMAND_SIZE] = {0};
char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' convertNCharToReadable((char *)row[col], length[col],
convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN); tbuf, COMMAND_SIZE);
sprintf(tableDes->cols[i].value, "%s", tbuf); curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"\'%s\'", tbuf);
break; break;
} }
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
#if 0
if (!g_args.mysqlFlag) { if (!g_args.mysqlFlag) {
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"%" PRId64 "",
*(int64_t *)row[col]);
} else { } else {
char buf[64] = "\0"; char buf[64] = "\0";
int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); int64_t ts = *((int64_t *)row[col]);
time_t tt = (time_t)(ts / 1000); time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt); struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000)); curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"\'%s.%03d\'",
buf, (int)(ts % 1000));
} }
#endif
break; break;
default: default:
break; break;
} }
}
taos_free_result(res); curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
totalRows, dbName, tbName);
lastRowsPrint += 5000000;
} }
taos_close(taos); total_sqlstr_len += curr_sqlstr_len;
return colCount;
if ((count >= g_args.data_batch)
|| (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
fprintf(fp, ";\n");
count = 0;
}
}
debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
fprintf(fp, "\n");
free(tmpBuffer);
return totalRows;
} }
static void dumpCreateDbClause( static int64_t dumpTableData(FILE *fp, char *tbName,
SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { char* dbName, int precision,
char sqlstr[TSDB_MAX_SQL_LEN] = {0}; char *jsonSchema) {
int64_t totalRows = 0;
char *pstr = sqlstr; char sqlstr[1024] = {0};
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) { int64_t start_time, end_time;
pstr += sprintf(pstr, if (strlen(g_args.humanStartTime)) {
"REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", if (TSDB_CODE_SUCCESS != taosParseTime(
dbInfo->replica, dbInfo->quorum, dbInfo->days, g_args.humanStartTime, &start_time,
dbInfo->keeplist, strlen(g_args.humanStartTime),
dbInfo->cache, precision, 0)) {
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, errorPrint("Input %s, time format error!\n",
dbInfo->fsync, g_args.humanStartTime);
dbInfo->cachelast, return -1;
dbInfo->comp, dbInfo->precision, dbInfo->update); }
} else {
start_time = g_args.start_time;
}
if (strlen(g_args.humanEndTime)) {
if (TSDB_CODE_SUCCESS != taosParseTime(
g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
precision, 0)) {
errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
return -1;
}
} else {
end_time = g_args.end_time;
} }
pstr += sprintf(pstr, ";"); sprintf(sqlstr,
fprintf(fp, "%s\n\n", sqlstr); "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbName, start_time, end_time);
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbName, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbName);
return -1;
}
TAOS_RES* res = taos_query(taos, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
errorPrint("failed to run command %s, reason: %s\n",
sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
}
#ifdef AVRO_SUPPORT
if (g_args.avro) {
char avroFilename[MAX_PATH_LEN] = {0};
if (g_args.outpath[0] != 0) {
sprintf(avroFilename, "%s/%s.%s.avro",
g_args.outpath, dbName, tbName);
} else {
sprintf(avroFilename, "%s.%s.avro",
dbName, tbName);
}
totalRows = writeResultToAvro(avroFilename, jsonSchema, res);
} else
#endif
totalRows = writeResultToSql(res, fp, dbName, tbName);
taos_free_result(res);
taos_close(taos);
return totalRows;
}
static int64_t dumpNormalTable(
TAOS *taos,
char *dbName,
char *stable,
char *tbName,
int precision,
FILE *fp
) {
int colCount = 0;
TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ sizeof(ColDes) * TSDB_MAX_COLUMNS);
if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
colCount = getTableDes(taos, dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create child-table using super-table
dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
} else { // dump table definition
colCount = getTableDes(taos, dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create normal-table or super-table
dumpCreateTableClause(tableDes, colCount, fp, dbName);
}
char *jsonSchema = NULL;
#ifdef AVRO_SUPPORT
if (g_args.avro) {
if (0 != convertTbDesToJson(
dbName, tbName, tableDes, colCount, &jsonSchema)) {
errorPrint("%s() LN%d, convertTbDesToJson failed\n",
__func__,
__LINE__);
freeTbDes(tableDes);
return -1;
}
}
#endif
int64_t totalRows = 0;
if (!g_args.schemaonly) {
totalRows = dumpTableData(fp, tbName, dbName, precision,
jsonSchema);
}
tfree(jsonSchema);
freeTbDes(tableDes);
return totalRows;
}
static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName)
{
int64_t count = 0;
char tmpBuf[MAX_PATH_LEN] = {0};
FILE *fp = NULL;
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%s.sql",
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
}
count = dumpNormalTable(
taos,
dbInfo->name,
NULL,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
if (count > 0) {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
fclose(fp);
return count;
}
static int64_t dumpNormalTableBelongStb(
TAOS *taos,
SDbInfo *dbInfo, char *stbName, char *ntbName)
{
int64_t count = 0;
char tmpBuf[MAX_PATH_LEN] = {0};
FILE *fp = NULL;
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%s.sql",
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
}
count = dumpNormalTable(
taos,
dbInfo->name,
stbName,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
if (count > 0) {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
fclose(fp);
return count;
}
static void *dumpNtbOfDb(void *arg) {
threadInfo *pThreadInfo = (threadInfo *)arg;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
debugPrint("dump table count = \t%"PRId64"\n",
pThreadInfo->count);
FILE *fp = NULL;
char tmpBuf[MAX_PATH_LEN] = {0};
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%d.sql",
g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%d.sql",
pThreadInfo->dbName, pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return NULL;
}
int64_t count;
for (int64_t i = 0; i < pThreadInfo->count; i++) {
debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
pThreadInfo->threadIndex, i,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name);
count = dumpNormalTable(
pThreadInfo->taos,
pThreadInfo->dbName,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name,
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} else {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
}
fclose(fp);
return NULL;
}
static int checkParam() {
if (g_args.all_databases && g_args.databases) {
errorPrint("%s", "conflict option --all-databases and --databases\n");
return -1;
}
if (g_args.start_time > g_args.end_time) {
errorPrint("%s", "start time is larger than end time\n");
return -1;
}
if (g_args.arg_list_len == 0) {
if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
errorPrint("%s", "taosdump requires parameters\n");
return -1;
}
}
/*
if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
fprintf(stderr, "duplicate parameter input and output file path\n");
return -1;
}
*/
if (!g_args.isDumpIn && g_args.encode != NULL) {
fprintf(stderr, "invalid option in dump out\n");
return -1;
}
if (g_args.table_batch <= 0) {
fprintf(stderr, "invalid option in dump out\n");
return -1;
}
return 0;
}
/*
static bool isEmptyCommand(char *cmd) {
char *pchar = cmd;
while (*pchar != '\0') {
if (*pchar != ' ') return false;
pchar++;
}
return true;
}
static void taosReplaceCtrlChar(char *str) {
bool ctrlOn = false;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) {
if (ctrlOn) {
switch (*str) {
case 'n':
*pstr = '\n';
pstr++;
break;
case 'r':
*pstr = '\r';
pstr++;
break;
case 't':
*pstr = '\t';
pstr++;
break;
case '\\':
*pstr = '\\';
pstr++;
break;
case '\'':
*pstr = '\'';
pstr++;
break;
default:
break;
}
ctrlOn = false;
} else {
if (*str == '\\') {
ctrlOn = true;
} else {
*pstr = *str;
pstr++;
}
}
}
*pstr = '\0';
}
*/
char *ascii_literal_list[] = {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
"\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
"\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
"\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
"4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
"\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
"\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
"\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
"\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
"\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
"\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
"\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
"\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
char *pstr = str;
char *pbuf = buf;
while (size > 0) {
if (*pstr == '\0') break;
pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
pstr++;
size--;
}
*pbuf = '\0';
return 0;
} }
static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
FILE *fp, char* dbName) { char *pstr = str;
int counter = 0; char *pbuf = buf;
int count_temp = 0; wchar_t wc;
char sqlstr[COMMAND_SIZE]; while (size > 0) {
if (*pstr == '\0') break;
int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
if (byte_width < 0) {
errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
exit(-1);
}
char* pstr = sqlstr; if ((int)wc < 256) {
pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
} else {
memcpy(pbuf, pstr, byte_width);
pbuf += byte_width;
}
pstr += byte_width;
}
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", *pbuf = '\0';
dbName, tableDes->name);
for (; counter < numOfCols; counter++) { return 0;
if (tableDes->cols[counter].note[0] != '\0') break; }
if (counter == 0) { static void dumpCharset(FILE *fp) {
pstr += sprintf(pstr, " (%s %s", char charsetline[256];
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || (void)fseek(fp, 0, SEEK_SET);
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { sprintf(charsetline, "#!%s\n", tsCharset);
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); (void)fwrite(charsetline, strlen(charsetline), 1, fp);
} }
}
count_temp = counter; static void loadFileCharset(FILE *fp, char *fcharset) {
char * line = NULL;
size_t line_size = 0;
for (; counter < numOfCols; counter++) { (void)fseek(fp, 0, SEEK_SET);
if (counter == count_temp) { ssize_t size = getline(&line, &line_size, fp);
pstr += sprintf(pstr, ") TAGS (%s %s", if (size <= 2) {
tableDes->cols[counter].field, tableDes->cols[counter].type); goto _exit_no_charset;
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} }
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if (strncmp(line, "#!", 2) != 0) {
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { goto _exit_no_charset;
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
} }
if (line[size - 1] == '\n') {
line[size - 1] = '\0';
size--;
} }
strcpy(fcharset, line + 2);
pstr += sprintf(pstr, ");"); tfree(line);
return;
debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
return fprintf(fp, "%s\n\n", sqlstr);
}
static int writeSchemaToAvro(char *jsonAvroSchema) _exit_no_charset:
{ (void)fseek(fp, 0, SEEK_SET);
errorPrint("%s() LN%d, TODO: implement write schema to avro", *fcharset = '\0';
__func__, __LINE__); tfree(line);
return 0; return;
} }
static int64_t writeResultToAvro(TAOS_RES *res) // ======== dumpIn support multi threads functions ================================//
{
errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
return 0;
}
static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset,
{ char* encode, char* fileName) {
int64_t totalRows = 0; int read_len = 0;
char * cmd = NULL;
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;
int32_t sql_buf_len = g_args.max_sql_len; cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); if (cmd == NULL) {
if (tmpBuffer == NULL) { errorPrint("%s() LN%d, failed to allocate memory\n",
errorPrint("failed to allocate %d memory\n", sql_buf_len + 128); __func__, __LINE__);
return -1; return -1;
} }
char *pstr = tmpBuffer; int lastRowsPrint = 5000000;
int lineNo = 0;
TAOS_ROW row = NULL; while ((read_len = getline(&line, &line_len, fp)) != -1) {
int numFields = 0; ++lineNo;
int rowFlag = 0; if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
int64_t lastRowsPrint = 5000000; line[--read_len] = '\0';
int count = 0;
numFields = taos_field_count(res); //if (read_len == 0 || isCommentLine(line)) { // line starts with #
assert(numFields > 0); if (read_len == 0 ) {
TAOS_FIELD *fields = taos_fetch_fields(res); continue;
}
int32_t curr_sqlstr_len = 0; if (line[read_len - 1] == '\\') {
int32_t total_sqlstr_len = 0; line[read_len - 1] = ' ';
memcpy(cmd + cmd_len, line, read_len);
cmd_len += read_len;
continue;
}
while ((row = taos_fetch_row(res)) != NULL) { memcpy(cmd + cmd_len, line, read_len);
curr_sqlstr_len = 0; cmd[read_len + cmd_len]= '\0';
if (queryDbImpl(taos, cmd)) {
errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
__func__, __LINE__, lineNo, fileName);
fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
}
int32_t* length = taos_fetch_lengths(res); // act len memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
cmd_len = 0;
if (count == 0) { if (lineNo >= lastRowsPrint) {
total_sqlstr_len = 0; printf(" %d lines already be executed from file %s\n", lineNo, fileName);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, lastRowsPrint += 5000000;
"INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
if (g_args.mysqlFlag) {
if (0 == rowFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
rowFlag++;
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
}
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
} }
} }
for (int col = 0; col < numFields; col++) { tfree(cmd);
if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); tfree(line);
return 0;
}
if (row[col] == NULL) { static void* dumpInSqlWorkThreadFp(void *arg)
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); {
threadInfo *pThread = (threadInfo*)arg;
setThreadName("dumpInSqlWorkThrd");
fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n",
pThread->threadIndex, pThread->count, pThread->from);
for (int64_t i = 0; i < pThread->count; i++) {
char sqlFile[MAX_PATH_LEN];
sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]);
FILE* fp = openDumpInFile(sqlFile);
if (NULL == fp) {
errorPrint("[%d] Failed to open input file: %s\n",
pThread->threadIndex, sqlFile);
continue; continue;
} }
switch (fields[col].type) { if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode,
case TSDB_DATA_TYPE_BOOL: sqlFile)) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", okPrint("[%d] Success dump in file: %s\n",
((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); pThread->threadIndex, sqlFile);
break;
case TSDB_DATA_TYPE_TINYINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
break;
case TSDB_DATA_TYPE_SMALLINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
break;
case TSDB_DATA_TYPE_INT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
break;
case TSDB_DATA_TYPE_BIGINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
*((int64_t *)row[col]));
break;
case TSDB_DATA_TYPE_FLOAT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
break;
case TSDB_DATA_TYPE_DOUBLE:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
break;
case TSDB_DATA_TYPE_BINARY:
{
char tbuf[COMMAND_SIZE] = {0};
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR:
{
char tbuf[COMMAND_SIZE] = {0};
convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
if (!g_args.mysqlFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
*(int64_t *)row[col]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[col]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
buf, (int)(ts % 1000));
}
break;
default:
break;
} }
fclose(fp);
} }
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); return NULL;
}
totalRows++; static int dumpInSqlWorkThreads()
count++; {
fprintf(fp, "%s", tmpBuffer); int32_t threads = g_args.thread_num;
if (totalRows >= lastRowsPrint) { uint64_t sqlFileCount = getFilesNum("sql");
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", if (0 == sqlFileCount) {
totalRows, dbName, tbName); debugPrint("No .sql file found in %s\n", g_args.inpath);
lastRowsPrint += 5000000; return 0;
} }
total_sqlstr_len += curr_sqlstr_len; createDumpinList("sql", sqlFileCount);
if ((count >= g_args.data_batch) threadInfo *pThread;
|| (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
fprintf(fp, ";\n"); pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
count = 0; threadInfo *infos = (threadInfo *)calloc(
} threads, sizeof(threadInfo));
assert(pids);
assert(infos);
int64_t a = sqlFileCount / threads;
if (a < 1) {
threads = sqlFileCount;
a = 1;
} }
debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); int64_t b = 0;
if (threads != 0) {
fprintf(fp, "\n"); b = sqlFileCount % threads;
atomic_add_fetch_64(&g_totalDumpOutRows, totalRows); }
free(tmpBuffer);
return 0; int64_t from = 0;
}
static int64_t dumpTableData(FILE *fp, char *tbName, for (int32_t t = 0; t < threads; ++t) {
char* dbName, int precision, pThread = infos + t;
char *jsonAvroSchema) { pThread->threadIndex = t;
int64_t totalRows = 0;
char sqlstr[1024] = {0}; pThread->from = from;
pThread->count = t<b?a+1:a;
from += pThread->count;
verbosePrint(
"Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n",
t, pThread->count, pThread->from);
int64_t start_time, end_time; pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
if (strlen(g_args.humanStartTime)) { NULL, g_args.port);
if (TSDB_CODE_SUCCESS != taosParseTime( if (pThread->taos == NULL) {
g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime), errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
precision, 0)) { free(infos);
errorPrint("Input %s, time format error!\n", g_args.humanStartTime); free(pids);
return -1; return -1;
} }
} else {
start_time = g_args.start_time;
}
if (strlen(g_args.humanEndTime)) { if (pthread_create(pids + t, NULL,
if (TSDB_CODE_SUCCESS != taosParseTime( dumpInSqlWorkThreadFp, (void*)pThread) != 0) {
g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), errorPrint("%s() LN%d, thread[%d] failed to start\n",
precision, 0)) { __func__, __LINE__, pThread->threadIndex);
errorPrint("Input %s, time format error!\n", g_args.humanEndTime); exit(EXIT_FAILURE);
return -1;
} }
} else {
end_time = g_args.end_time;
} }
sprintf(sqlstr, for (int t = 0; t < threads; ++t) {
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", pthread_join(pids[t], NULL);
dbName, tbName, start_time, end_time);
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbName, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbName);
return -1;
} }
TAOS_RES* res = taos_query(taos, sqlstr); for (int t = 0; t < threads; ++t) {
int32_t code = taos_errno(res); taos_close(infos[t].taos);
if (code != 0) {
errorPrint("failed to run command %s, reason: %s\n",
sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
} }
free(infos);
free(pids);
if (g_args.avro) { freeFileList(g_tsDumpInSqlFiles, sqlFileCount);
writeSchemaToAvro(jsonAvroSchema);
totalRows = writeResultToAvro(res);
} else {
totalRows = writeResultToSql(res, fp, dbName, tbName);
}
taos_free_result(res); return 0;
taos_close(taos);
return totalRows;
} }
static int checkParam() { static int dumpInDbs()
if (g_args.all_databases && g_args.databases) { {
errorPrint("%s", "conflict option --all-databases and --databases\n"); TAOS *taos = taos_connect(
return -1; g_args.host, g_args.user, g_args.password,
} NULL, g_args.port);
if (g_args.start_time > g_args.end_time) { if (taos == NULL) {
errorPrint("%s", "start time is larger than end time\n"); errorPrint("%s() LN%d, failed to connect to TDengine server\n",
__func__, __LINE__);
return -1; return -1;
} }
if (g_args.arg_list_len == 0) { char dbsSql[MAX_PATH_LEN];
if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql");
errorPrint("%s", "taosdump requires parameters\n");
return -1; FILE *fp = openDumpInFile(dbsSql);
} if (NULL == fp) {
} errorPrint("%s() LN%d, failed to open input file %s\n",
/* __func__, __LINE__, dbsSql);
if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
fprintf(stderr, "duplicate parameter input and output file path\n");
return -1;
}
*/
if (!g_args.isDumpIn && g_args.encode != NULL) {
fprintf(stderr, "invalid option in dump out\n");
return -1; return -1;
} }
debugPrint("Success Open input file: %s\n", dbsSql);
loadFileCharset(fp, g_tsCharset);
if (g_args.table_batch <= 0) { if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) {
fprintf(stderr, "invalid option in dump out\n"); okPrint("Success dump in file: %s !\n", dbsSql);
return -1;
} }
fclose(fp);
taos_close(taos);
return 0; return 0;
} }
/* static int64_t dumpIn() {
static bool isEmptyCommand(char *cmd) { assert(g_args.isDumpIn);
char *pchar = cmd;
while (*pchar != '\0') { int64_t ret = 0;
if (*pchar != ' ') return false; if (dumpInDbs()) {
pchar++; errorPrint("%s", "Failed to dump dbs in!\n");
exit(EXIT_FAILURE);
} }
return true; ret = dumpInSqlWorkThreads();
#ifdef AVRO_SUPPORT
if (0 == ret) {
ret = dumpInAvroWorkThreads();
}
#endif
return ret;
} }
static void taosReplaceCtrlChar(char *str) { static void *dumpNormalTablesOfStb(void *arg) {
bool ctrlOn = false; threadInfo *pThreadInfo = (threadInfo *)arg;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) { debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
if (ctrlOn) { debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count);
switch (*str) {
case 'n': char command[COMMAND_SIZE];
*pstr = '\n';
pstr++; sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
break; pThreadInfo->dbName, pThreadInfo->stbName,
case 'r': pThreadInfo->count, pThreadInfo->from);
*pstr = '\r';
pstr++; TAOS_RES *res = taos_query(pThreadInfo->taos, command);
break; int32_t code = taos_errno(res);
case 't': if (code) {
*pstr = '\t'; errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
pstr++; __func__, __LINE__, command, taos_errstr(res));
break; taos_free_result(res);
case '\\': return NULL;
*pstr = '\\';
pstr++;
break;
case '\'':
*pstr = '\'';
pstr++;
break;
default:
break;
} }
ctrlOn = false;
} else { FILE *fp = NULL;
if (*str == '\\') { char tmpBuf[MAX_PATH_LEN] = {0};
ctrlOn = true;
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%s.%d.sql",
g_args.outpath,
pThreadInfo->dbName,
pThreadInfo->stbName,
pThreadInfo->threadIndex);
} else { } else {
*pstr = *str; sprintf(tmpBuf, "%s.%s.%d.sql",
pstr++; pThreadInfo->dbName,
} pThreadInfo->stbName,
} pThreadInfo->threadIndex);
} }
*pstr = '\0'; fp = fopen(tmpBuf, "w");
}
*/
char *ascii_literal_list[] = { if (fp == NULL) {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", errorPrint("%s() LN%d, failed to open file %s\n",
"\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", __func__, __LINE__, tmpBuf);
"\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", return NULL;
"\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", }
"4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
"\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
"\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
"\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
"\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
"\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
"\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
"\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
"\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
static int converStringToReadable(char *str, int size, char *buf, int bufsize) { TAOS_ROW row = NULL;
char *pstr = str; int64_t i = 0;
char *pbuf = buf; int64_t count;
while (size > 0) { while((row = taos_fetch_row(res)) != NULL) {
if (*pstr == '\0') break; debugPrint("[%d] sub table %"PRId64": name: %s\n",
pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
pstr++;
size--; count = dumpNormalTable(
pThreadInfo->taos,
pThreadInfo->dbName,
pThreadInfo->stbName,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} else {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
} }
*pbuf = '\0'; }
return 0;
fclose(fp);
return NULL;
} }
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { static int64_t dumpNtbOfDbByThreads(
char *pstr = str; SDbInfo *dbInfo,
char *pbuf = buf; int64_t ntbCount)
wchar_t wc; {
while (size > 0) { if (ntbCount <= 0) {
if (*pstr == '\0') break; return 0;
int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
if (byte_width < 0) {
errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
exit(-1);
} }
if ((int)wc < 256) { int threads = g_args.thread_num;
pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
} else { int64_t a = ntbCount / threads;
memcpy(pbuf, pstr, byte_width); if (a < 1) {
pbuf += byte_width; threads = ntbCount;
} a = 1;
pstr += byte_width;
} }
*pbuf = '\0'; assert(threads);
int64_t b = ntbCount % threads;
return 0; threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
} pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
assert(pids);
assert(infos);
static void dumpCharset(FILE *fp) { for (int64_t i = 0; i < threads; i++) {
char charsetline[256]; threadInfo *pThreadInfo = infos + i;
pThreadInfo->taos = taos_connect(
g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
(void)fseek(fp, 0, SEEK_SET); return -1;
sprintf(charsetline, "#!%s\n", tsCharset); }
(void)fwrite(charsetline, strlen(charsetline), 1, fp);
}
static void loadFileCharset(FILE *fp, char *fcharset) { pThreadInfo->threadIndex = i;
char * line = NULL; pThreadInfo->count = (i<b)?a+1:a;
size_t line_size = 0; pThreadInfo->from = (i==0)?0:
((threadInfo *)(infos + i - 1))->from +
((threadInfo *)(infos + i - 1))->count;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
(void)fseek(fp, 0, SEEK_SET); pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
ssize_t size = getline(&line, &line_size, fp);
if (size <= 2) {
goto _exit_no_charset;
} }
if (strncmp(line, "#!", 2) != 0) { for (int64_t i = 0; i < threads; i++) {
goto _exit_no_charset; pthread_join(pids[i], NULL);
} }
if (line[size - 1] == '\n') {
line[size - 1] = '\0'; for (int64_t i = 0; i < threads; i++) {
size--; threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
} }
strcpy(fcharset, line + 2);
tfree(line); free(pids);
return; free(infos);
_exit_no_charset: return 0;
(void)fseek(fp, 0, SEEK_SET);
*fcharset = '\0';
tfree(line);
return;
} }
// ======== dumpIn support multi threads functions ================================// static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
static char **g_tsDumpInSqlFiles = NULL;
static int32_t g_tsSqlFileNum = 0;
static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
static char g_tsCharset[64] = {0};
static int taosGetFilesNum(const char *directoryName,
const char *prefix, const char *prefix2)
{ {
char cmd[1024] = { 0 }; TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
}
if (prefix2) char command[COMMAND_SIZE];
sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ", TAOS_RES *result;
directoryName, prefix, directoryName, prefix2); int32_t code;
else
sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
FILE *fp = popen(cmd, "r"); sprintf(command, "USE %s", dbInfo->name);
if (fp == NULL) { result = taos_query(taos, command);
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); code = taos_errno(result);
exit(-1); if (code != 0) {
errorPrint("invalid database %s, reason: %s\n",
dbInfo->name, taos_errstr(result));
taos_close(taos);
return 0;
} }
int fileNum = 0; sprintf(command, "SHOW TABLES");
if (fscanf(fp, "%d", &fileNum) != 1) { result = taos_query(taos, command);
errorPrint("failed to execute:%s, parse result error\n", cmd); code = taos_errno(result);
exit(-1); if (code != 0) {
errorPrint("Failed to show %s\'s tables, reason: %s\n",
dbInfo->name, taos_errstr(result));
taos_close(taos);
return 0;
} }
if (fileNum <= 0) { g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
errorPrint("directory:%s is empty\n", directoryName); assert(g_tablesList);
exit(-1);
TAOS_ROW row;
int64_t count = 0;
while(NULL != (row = taos_fetch_row(result))) {
debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
__func__, __LINE__,
count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
tstrncpy(((TableInfo *)(g_tablesList + count))->name,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
if (stbName) {
tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
((TableInfo *)(g_tablesList + count))->belongStb = true;
}
count ++;
} }
taos_close(taos);
int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
pclose(fp); free(g_tablesList);
return fileNum; g_tablesList = NULL;
return records;
} }
static void taosParseDirectory(const char *directoryName, static int64_t dumpNtbOfStbByThreads(
const char *prefix, const char *prefix2, SDbInfo *dbInfo, char *stbName)
char **fileArray, int totalFiles)
{ {
char cmd[1024] = { 0 }; int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
if (prefix2) { if (ntbCount <= 0) {
sprintf(cmd, "ls %s/*.%s %s/*.%s | sort", return 0;
directoryName, prefix, directoryName, prefix2);
} else {
sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
} }
FILE *fp = popen(cmd, "r"); int threads = g_args.thread_num;
if (fp == NULL) {
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
exit(-1);
}
int fileNum = 0; int64_t a = ntbCount / threads;
while (fscanf(fp, "%128s", fileArray[fileNum++])) { if (a < 1) {
if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) { threads = ntbCount;
fileNum--; a = 1;
}
if (fileNum >= totalFiles) {
break;
}
} }
if (fileNum != totalFiles) { assert(threads);
errorPrint("directory:%s changed while read\n", directoryName); int64_t b = ntbCount % threads;
pclose(fp);
exit(-1);
}
pclose(fp); pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
} threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
assert(pids);
assert(infos);
static void taosCheckDatabasesSQLFile(const char *directoryName) for (int64_t i = 0; i < threads; i++) {
{ threadInfo *pThreadInfo = infos + i;
char cmd[1024] = { 0 }; pThreadInfo->taos = taos_connect(
sprintf(cmd, "ls %s/dbs.sql", directoryName); g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
FILE *fp = popen(cmd, "r"); return -1;
if (fp == NULL) {
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
exit(-1);
} }
while (fscanf(fp, "%128s", g_tsDbSqlFile)) { pThreadInfo->threadIndex = i;
break; pThreadInfo->count = (i<b)?a+1:a;
} pThreadInfo->from = (i==0)?0:
((threadInfo *)(infos + i - 1))->from +
((threadInfo *)(infos + i - 1))->count;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
pclose(fp); strcpy(pThreadInfo->stbName, stbName);
} pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
}
static void taosMallocDumpFiles() for (int64_t i = 0; i < threads; i++) {
{ pthread_join(pids[i], NULL);
g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
for (int i = 0; i < g_tsSqlFileNum; i++) {
g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
} }
}
static void freeDumpFiles() int64_t records = 0;
{ for (int64_t i = 0; i < threads; i++) {
for (int i = 0; i < g_tsSqlFileNum; i++) { threadInfo *pThreadInfo = infos + i;
tfree(g_tsDumpInSqlFiles[i]); records += pThreadInfo->rowsOfDumpOut;
taos_close(pThreadInfo->taos);
} }
tfree(g_tsDumpInSqlFiles);
free(pids);
free(infos);
return records;
} }
static void taosGetDirectoryFileList(char *inputDir) static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
{ {
struct stat fileStat; dumpCreateDbClause(dbInfo, g_args.with_property, fp);
if (stat(inputDir, &fileStat) < 0) {
errorPrint("%s not exist\n", inputDir);
exit(-1);
}
if (fileStat.st_mode & S_IFDIR) { fprintf(g_fpOfResult, "\n#### database: %s\n",
taosCheckDatabasesSQLFile(inputDir); dbInfo->name);
if (g_args.avro) g_resultStatistics.totalDatabasesOfDumpOut++;
g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
else
g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
int tsSqlFileNumOfTbls = g_tsSqlFileNum; dumpCreateSTableClauseOfDb(dbInfo, fp);
if (g_tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
}
taosMallocDumpFiles();
if (0 != tsSqlFileNumOfTbls) {
if (g_args.avro) {
taosParseDirectory(inputDir, "sql", "avro",
g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
} else {
taosParseDirectory(inputDir, "sql", NULL,
g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
}
}
fprintf(stdout, "\nstart to dispose %d files in %s\n",
g_tsSqlFileNum, inputDir);
} else {
errorPrint("%s is not a directory\n", inputDir);
exit(-1);
}
}
static FILE* taosOpenDumpInFile(char *fptr) { return dumpNTablesOfDb(dbInfo);
wordexp_t full_path; }
if (wordexp(fptr, &full_path, 0) != 0) { static int dumpOut() {
errorPrint("illegal file name: %s\n", fptr); TAOS *taos = NULL;
return NULL; TAOS_RES *result = NULL;
}
char *fname = full_path.we_wordv[0]; TAOS_ROW row;
FILE *fp = NULL;
int32_t count = 0;
FILE *f = NULL; char tmpBuf[MAX_PATH_LEN] = {0};
if ((fname) && (strlen(fname) > 0)) { if (g_args.outpath[0] != 0) {
f = fopen(fname, "r"); sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
if (f == NULL) { } else {
errorPrint("%s() LN%d, failed to open file %s\n", sprintf(tmpBuf, "dbs.sql");
__func__, __LINE__, fname);
} }
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
} }
wordfree(&full_path); g_args.dumpDbCount = getDumpDbCount();
return f; debugPrint("%s() LN%d, dump db count: %d\n",
} __func__, __LINE__, g_args.dumpDbCount);
static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, if (0 == g_args.dumpDbCount) {
char* encode, char* fileName) { errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
int read_len = 0; fclose(fp);
char * cmd = NULL; return -1;
size_t cmd_len = 0; }
char * line = NULL;
size_t line_len = 0;
cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
if (cmd == NULL) { if (g_dbInfos == NULL) {
errorPrint("%s() LN%d, failed to allocate memory\n", errorPrint("%s() LN%d, failed to allocate memory\n",
__func__, __LINE__); __func__, __LINE__);
return -1; goto _exit_failure;
} }
int lastRowsPrint = 5000000; char command[COMMAND_SIZE];
int lineNo = 0;
while ((read_len = getline(&line, &line_len, fp)) != -1) {
++lineNo;
if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
if (read_len == 0 ) {
continue;
}
if (line[read_len - 1] == '\\') { /* Connect to server */
line[read_len - 1] = ' '; taos = taos_connect(g_args.host, g_args.user, g_args.password,
memcpy(cmd + cmd_len, line, read_len); NULL, g_args.port);
cmd_len += read_len; if (taos == NULL) {
continue; errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
goto _exit_failure;
} }
memcpy(cmd + cmd_len, line, read_len); /* --------------------------------- Main Code -------------------------------- */
cmd[read_len + cmd_len]= '\0'; /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
if (queryDbImpl(taos, cmd)) { /* */
errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", dumpCharset(fp);
__func__, __LINE__, lineNo, fileName);
fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); sprintf(command, "show databases");
cmd_len = 0; result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (lineNo >= lastRowsPrint) { if (code != 0) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName); errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
lastRowsPrint += 5000000; __func__, __LINE__, command, taos_errstr(result));
} goto _exit_failure;
} }
tfree(cmd); TAOS_FIELD *fields = taos_fetch_fields(result);
tfree(line);
fclose(fp);
return 0;
}
static void* dumpInWorkThreadFp(void *arg) while ((row = taos_fetch_row(result)) != NULL) {
{ // sys database name : 'log', but subsequent version changed to 'log'
threadInfo *pThread = (threadInfo*)arg; if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
setThreadName("dumpInWorkThrd"); fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
&& (!g_args.allow_sys)) {
continue;
}
for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { if (g_args.databases) { // input multi dbs
if (f % pThread->totalThreads == pThread->threadIndex) { if (inDatabasesSeq(
char *SQLFileName = g_tsDumpInSqlFiles[f]; (char *)row[TSDB_SHOW_DB_NAME_INDEX],
FILE* fp = taosOpenDumpInFile(SQLFileName); fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
if (NULL == fp) {
continue; continue;
} }
fprintf(stderr, ", Success Open input file: %s\n", } else if (!g_args.all_databases) { // only input one db
SQLFileName); if (strncasecmp(g_args.arg_list[0],
dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName); (char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
continue;
} }
g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (g_dbInfos[count] == NULL) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
__func__, __LINE__, (uint64_t)sizeof(SDbInfo));
goto _exit_failure;
} }
return NULL; okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
} tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
min(TSDB_DB_NAME_LEN,
fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
g_dbInfos[count]->ntables =
*((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups =
*((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
g_dbInfos[count]->replica =
*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
g_dbInfos[count]->quorum =
*((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days =
*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
static void startDumpInWorkThreads() tstrncpy(g_dbInfos[count]->keeplist,
{ (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
pthread_attr_t thattr; min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
threadInfo *pThread; //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
int32_t totalThreads = g_args.thread_num; //g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
g_dbInfos[count]->cache =
*((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
g_dbInfos[count]->blocks =
*((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
g_dbInfos[count]->minrows =
*((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
g_dbInfos[count]->maxrows =
*((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
g_dbInfos[count]->wallevel =
*((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
g_dbInfos[count]->fsync =
*((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
g_dbInfos[count]->comp =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
if (totalThreads > g_tsSqlFileNum) { tstrncpy(g_dbInfos[count]->precision,
totalThreads = g_tsSqlFileNum; (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
DB_PRECISION_LEN);
g_dbInfos[count]->update =
*((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
} }
count++;
threadInfo *threadObj = (threadInfo *)calloc( if (g_args.databases) {
totalThreads, sizeof(threadInfo)); if (count > g_args.dumpDbCount)
break;
if (NULL == threadObj) { } else if (!g_args.all_databases) {
errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); if (count >= 1)
break;
}
} }
for (int32_t t = 0; t < totalThreads; ++t) { if (count == 0) {
pThread = threadObj + t; errorPrint("%d databases valid to dump\n", count);
pThread->threadIndex = t; goto _exit_failure;
pThread->totalThreads = totalThreads;
pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port);
if (pThread->taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
free(threadObj);
return;
} }
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&(pThread->threadID), &thattr, if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
dumpInWorkThreadFp, (void*)pThread) != 0) { for (int i = 0; i < count; i++) {
errorPrint("%s() LN%d, thread:%d failed to start\n", int64_t records = 0;
__func__, __LINE__, pThread->threadIndex); records = dumpWholeDatabase(g_dbInfos[i], fp);
exit(0); if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[i]->name);
g_totalDumpOutRows += records;
} }
} }
} else {
for (int t = 0; t < totalThreads; ++t) { if (1 == g_args.arg_list_len) {
pthread_join(threadObj[t].threadID, NULL); int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[0]->name);
g_totalDumpOutRows += records;
} }
} else {
for (int t = 0; t < totalThreads; ++t) { dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
taos_close(threadObj[t].taos);
} }
free(threadObj);
}
static int dumpIn() {
assert(g_args.isDumpIn);
TAOS *taos = NULL; int superTblCnt = 0 ;
FILE *fp = NULL; for (int i = 1; g_args.arg_list[i]; i++) {
TableRecordInfo tableRecordInfo;
taos = taos_connect( if (getTableRecordInfo(g_dbInfos[0]->name,
g_args.host, g_args.user, g_args.password, g_args.arg_list[i],
NULL, g_args.port); &tableRecordInfo) < 0) {
if (taos == NULL) { errorPrint("input the invalid table %s\n",
errorPrint("%s() LN%d, failed to connect to TDengine server\n", g_args.arg_list[i]);
__func__, __LINE__); continue;
return -1;
} }
taosGetDirectoryFileList(g_args.inpath); int64_t records = 0;
if (tableRecordInfo.isStb) { // dump all table of this stable
int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum; int ret = dumpStableClasuse(
if (g_tsDbSqlFile[0] != 0) { taos,
tsSqlFileNumOfTbls--; g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp = taosOpenDumpInFile(g_tsDbSqlFile); fp);
if (NULL == fp) { if (ret >= 0) {
errorPrint("%s() LN%d, failed to open input file %s\n", superTblCnt++;
__func__, __LINE__, g_tsDbSqlFile); records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
return -1; }
} else if (tableRecordInfo.belongStb){
dumpStableClasuse(
taos,
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp);
records = dumpNormalTableBelongStb(
taos,
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
g_args.arg_list[i]);
} else {
records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]);
} }
fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
loadFileCharset(fp, g_tsCharset);
dumpInOneFile(taos, fp, g_tsCharset, g_args.encode, if (records >= 0) {
g_tsDbSqlFile); okPrint("table: %s dumped\n", g_args.arg_list[i]);
g_totalDumpOutRows += records;
}
}
} }
taos_close(taos); taos_close(taos);
if (0 != tsSqlFileNumOfTbls) { /* Close the handle and return */
startDumpInWorkThreads(); fclose(fp);
} taos_free_result(result);
freeDbInfos();
freeDumpFiles(); fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return 0; return 0;
_exit_failure:
fclose(fp);
taos_close(taos);
taos_free_result(result);
freeDbInfos();
errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return -1;
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
...@@ -2988,7 +3859,10 @@ int main(int argc, char *argv[]) { ...@@ -2988,7 +3859,10 @@ int main(int argc, char *argv[]) {
printf("databasesSeq: %s\n", g_args.databasesSeq); printf("databasesSeq: %s\n", g_args.databasesSeq);
printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
printf("with_property: %s\n", g_args.with_property?"true":"false"); printf("with_property: %s\n", g_args.with_property?"true":"false");
#ifdef AVRO_SUPPORT
printf("avro format: %s\n", g_args.avro?"true":"false"); printf("avro format: %s\n", g_args.avro?"true":"false");
printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
#endif
printf("start_time: %" PRId64 "\n", g_args.start_time); printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("human readable start time: %s \n", g_args.humanStartTime); printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time); printf("end_time: %" PRId64 "\n", g_args.end_time);
...@@ -3042,7 +3916,10 @@ int main(int argc, char *argv[]) { ...@@ -3042,7 +3916,10 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq); fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
#ifdef AVRO_SUPPORT
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
#endif
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
...@@ -3072,6 +3949,7 @@ int main(int argc, char *argv[]) { ...@@ -3072,6 +3949,7 @@ int main(int argc, char *argv[]) {
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (dumpIn() < 0) { if (dumpIn() < 0) {
errorPrint("%s\n", "dumpIn() failed!");
ret = -1; ret = -1;
} }
} else { } else {
...@@ -3103,4 +3981,3 @@ int main(int argc, char *argv[]) { ...@@ -3103,4 +3981,3 @@ int main(int argc, char *argv[]) {
return ret; return ret;
} }
CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine) PROJECT(TDengine)
if(NOT WIN32)
string(ASCII 27 Esc)
set(ColourReset "${Esc}[m")
set(ColourBold "${Esc}[1m")
set(Red "${Esc}[31m")
set(Green "${Esc}[32m")
set(Yellow "${Esc}[33m")
set(Blue "${Esc}[34m")
set(Magenta "${Esc}[35m")
set(Cyan "${Esc}[36m")
set(White "${Esc}[37m")
set(BoldRed "${Esc}[1;31m")
set(BoldGreen "${Esc}[1;32m")
set(BoldYellow "${Esc}[1;33m")
set(BoldBlue "${Esc}[1;34m")
set(BoldMagenta "${Esc}[1;35m")
set(BoldCyan "${Esc}[1;36m")
set(BoldWhite "${Esc}[1;37m")
endif()
ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(monitor)
IF (TD_BUILD_HTTP) IF (TD_BUILD_HTTP)
...@@ -57,6 +37,8 @@ ELSE () ...@@ -57,6 +37,8 @@ ELSE ()
DEPENDS taos DEPENDS taos
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config" CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config"
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}" BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}"
INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/ INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/
) )
......
...@@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) { ...@@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) {
TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext;
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext, HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext,
sizeof(TSDB_CACHE_PTR_TYPE), 3000); sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive);
pContext->ppContext = ppContext; pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
......
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
extern "C" { extern "C" {
#endif #endif
#define TSDB_CFG_MAX_NUM 128
#define TSDB_CFG_MAX_NUM 129
#define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41 #define TSDB_CFG_VALUE_LEN 41
......
...@@ -31,9 +31,8 @@ class TDTestCase: ...@@ -31,9 +31,8 @@ class TDTestCase:
tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdLog.info('=============== step2,create table增加了转义字符') tdLog.info('=============== step2,create table with escape character')
tdLog.info('create table tb1 using stb1 tags("abc\\"def")') tdLog.info('create table tb1 using stb1 tags("abc\\"def")')
#增加了转义字符\
tdSql.execute('create table tb1 using stb1 tags("abc\\"def")') tdSql.execute('create table tb1 using stb1 tags("abc\\"def")')
tdLog.info('=============== step3,insert data') tdLog.info('=============== step3,insert data')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册