提交 6cafd224 编写于 作者: D dapan1121

Merge branch 'develop' into feature/TD-10748

...@@ -19,3 +19,6 @@ ...@@ -19,3 +19,6 @@
[submodule "src/plugins/blm3"] [submodule "src/plugins/blm3"]
path = src/plugins/blm3 path = src/plugins/blm3
url = https://github.com/taosdata/blm3 url = https://github.com/taosdata/blm3
[submodule "deps/avro"]
path = deps/avro
url = https://github.com/apache/avro
...@@ -15,6 +15,26 @@ ELSE () ...@@ -15,6 +15,26 @@ ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 3.0) CMAKE_MINIMUM_REQUIRED(VERSION 3.0)
ENDIF () ENDIF ()
if(NOT WIN32)
string(ASCII 27 Esc)
set(ColourReset "${Esc}[m")
set(ColourBold "${Esc}[1m")
set(Red "${Esc}[31m")
set(Green "${Esc}[32m")
set(Yellow "${Esc}[33m")
set(Blue "${Esc}[34m")
set(Magenta "${Esc}[35m")
set(Cyan "${Esc}[36m")
set(White "${Esc}[37m")
set(BoldRed "${Esc}[1;31m")
set(BoldGreen "${Esc}[1;32m")
set(BoldYellow "${Esc}[1;33m")
set(BoldBlue "${Esc}[1;34m")
set(BoldMagenta "${Esc}[1;35m")
set(BoldCyan "${Esc}[1;36m")
set(BoldWhite "${Esc}[1;37m")
endif()
SET(TD_ACCOUNT FALSE) SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE) SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)
......
...@@ -107,7 +107,77 @@ def pre_test(){ ...@@ -107,7 +107,77 @@ def pre_test(){
make > /dev/null make > /dev/null
make install > /dev/null make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/ || echo "not install" pip3 install ${WKC}/src/connector/python/
'''
return 1
}
def pre_test_noinstall(){
sh'hostname'
sh'''
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
git checkout 2.0
'''
}
else{
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd ${WK}
git reset --hard HEAD~10
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WK}
git checkout 2.0
'''
}
else{
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
mkdir debug
cd debug
cmake .. > /dev/null
make
''' '''
return 1 return 1
} }
...@@ -460,31 +530,55 @@ pipeline { ...@@ -460,31 +530,55 @@ pipeline {
stage('arm64centos7') { stage('arm64centos7') {
agent{label " arm64centos7 "} agent{label " arm64centos7 "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64centos8') { stage('arm64centos8') {
agent{label " arm64centos8 "} agent{label " arm64centos8 "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm32bionic') { stage('arm32bionic') {
agent{label " arm32bionic "} agent{label " arm32bionic "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64bionic') { stage('arm64bionic') {
agent{label " arm64bionic "} agent{label " arm64bionic "}
steps { steps {
pre_test() pre_test_noinstall()
} }
} }
stage('arm64focal') { stage('arm64focal') {
agent{label " arm64focal "} agent{label " arm64focal "}
steps { steps {
pre_test() pre_test_noinstall()
}
}
stage('centos7') {
agent{label " centos7 "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:trusty') {
agent{label " trusty "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:xenial') {
agent{label " xenial "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:bionic') {
agent{label " bionic "}
steps {
pre_test_noinstall()
} }
} }
......
...@@ -128,7 +128,6 @@ IF (TD_APLHINE) ...@@ -128,7 +128,6 @@ IF (TD_APLHINE)
MESSAGE(STATUS "aplhine is defined") MESSAGE(STATUS "aplhine is defined")
ENDIF () ENDIF ()
MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP})
IF ("${BUILD_HTTP}" STREQUAL "") IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX) IF (TD_LINUX)
IF (TD_ARM_32) IF (TD_ARM_32)
...@@ -140,16 +139,27 @@ IF ("${BUILD_HTTP}" STREQUAL "") ...@@ -140,16 +139,27 @@ IF ("${BUILD_HTTP}" STREQUAL "")
SET(BUILD_HTTP "true") SET(BUILD_HTTP "true")
ENDIF () ENDIF ()
ENDIF () ENDIF ()
MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP})
IF (${BUILD_HTTP} MATCHES "true") IF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE) SET(TD_BUILD_HTTP TRUE)
ELSEIF (${BUILD_HTTP} MATCHES "false")
SET(TD_BUILD_HTTP FALSE)
ENDIF () ENDIF ()
IF (TD_BUILD_HTTP) IF (TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED) ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF () ENDIF ()
IF ("${AVRO_SUPPORT}" MATCHES "true")
SET(TD_AVRO_SUPPORT TRUE)
ELSEIF ("${AVRO_SUPPORT}" MATCHES "false")
SET(TD_AVRO_SUPPORT FALSE)
ENDIF ()
IF (TD_AVRO_SUPPORT)
ADD_DEFINITIONS(-DAVRO_SUPPORT)
ENDIF ()
IF (TD_LINUX) IF (TD_LINUX)
ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_LINUX) ADD_DEFINITIONS(-D_LINUX)
......
...@@ -92,6 +92,8 @@ ENDIF () ...@@ -92,6 +92,8 @@ ENDIF ()
SET(TD_BUILD_HTTP FALSE) SET(TD_BUILD_HTTP FALSE)
SET(TD_AVRO_SUPPORT FALSE)
SET(TD_MEMORY_SANITIZER FALSE) SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true") IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE) SET(TD_MEMORY_SANITIZER TRUE)
......
...@@ -35,7 +35,7 @@ ELSEIF (TD_WINDOWS) ...@@ -35,7 +35,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .) #INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED) IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.36-dist.jar DESTINATION connector/jdbc)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.3.0.0") SET(TD_VER_NUMBER "2.3.1.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT) ...@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C) ADD_SUBDIRECTORY(MQTT-C)
ENDIF () ENDIF ()
IF (TD_AVRO_SUPPORT)
MESSAGE("")
MESSAGE("${Green} ENABLE avro format support ${ColourReset}")
MESSAGE("")
include(ExternalProject)
ExternalProject_Add(
apache-avro
PREFIX "avro"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c
BUILD_IN_SOURCE 1
PATCH_COMMAND
COMMAND git clean -f -d
COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build
)
ELSE ()
MESSAGE("")
MESSAGE("${Yellow} NO avro format support ${ColourReset}")
MESSAGE("")
ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED) IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("")
MESSAGE("${Green} ENABLE jemalloc ${ColourReset}")
MESSAGE("")
MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc ExternalProject_Add(jemalloc
PREFIX "jemalloc" PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
...@@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED) ...@@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ENDIF () ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true") IF (${TSZ_ENABLED} MATCHES "true")
ADD_SUBDIRECTORY(TSZ) ADD_SUBDIRECTORY(TSZ)
ENDIF() ENDIF()
\ No newline at end of file
Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
## <a class="anchor" id="grafana"></a>Grafana ## <a class="anchor" id="grafana"></a>Grafana
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。
### 安装Grafana ### 安装Grafana
...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 配置Grafana ### 配置Grafana
TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。 TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。
以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: 以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
``` ```
Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
```ini
[plugins] [plugins]
enable_alpha = true allow_loading_unsigned_plugins = tdengine-datasource
allow_loading_unsigned_plugins = taosdata-tdengine-datasource
``` ```
### 使用 Grafana ### 使用 Grafana
...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
* ALIAS BY:可设置当前查询别名。 * ALIAS BY:可设置当前查询别名。
* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 * GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![img](../images/connections/create_dashboard2.jpg) ![img](../images/connections/create_dashboard2.jpg)
...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
#### 导入 Dashboard #### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard 我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件 点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
导入完成之后可看到如下效果: 导入完成之后可看到如下效果:
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a>MATLAB ## <a class="anchor" id="matlab"></a>MATLAB
......
...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 下载 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 修改 /etc/telegraf/telegraf.conf ### 修改 /etc/telegraf/telegraf.conf
...@@ -61,7 +63,7 @@ sudo systemctl start telegraf ...@@ -61,7 +63,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。 点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘: 点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) ![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png)
......
...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 复制 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 配置 collectd ### 配置 collectd
...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S ...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S
#### 导入 collectd 仪表盘 #### 导入 collectd 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示选择 JSON 文件导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png) ![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png)
#### 导入 StatsD 仪表盘 #### 导入 StatsD 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 文件。如果安装 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示导入JSON文件。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png) ![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png)
## 总结 ## 总结
......
...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download. ...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download.
### Configure Grafana ### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory. Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> .
```bash
GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
``` ```
### Use Grafana ### Use Grafana
...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp ...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard #### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`. We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file: Click the `Import` button on the left panel and load the grafana id:
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported. You can see as follows after Dashboard imported.
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a> MATLAB ## <a class="anchor" id="matlab"></a> MATLAB
......
...@@ -203,6 +203,9 @@ keepColumnName 1 ...@@ -203,6 +203,9 @@ keepColumnName 1
# database name must be specified in restful interface if the following parameter is set, off by default # database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1 # httpDbNameMandatory 1
# http keep alive, default is 30 seconds
# httpKeepAlive 30000
# The following parameter is used to limit the maximum number of lines in log files. # The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters # max number of lines per log filters
# numOfLogLines 10000000 # numOfLogLines 10000000
......
...@@ -128,12 +128,12 @@ function check_link() { ...@@ -128,12 +128,12 @@ function check_link() {
function check_main_path() { function check_main_path() {
#check install main dir and all sub dir #check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d") main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
for i in ${main_dir[@]};do for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i check_file ${install_main_dir} $i
done done
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs") nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in ${nginx_main_dir[@]};do for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i check_file ${nginx_dir} $i
done done
fi fi
...@@ -143,11 +143,11 @@ function check_main_path() { ...@@ -143,11 +143,11 @@ function check_main_path() {
function check_bin_path() { function check_bin_path() {
# check install bin dir and all sub dir # check install bin dir and all sub dir
bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
for i in ${bin_dir[@]};do for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i check_file ${sbin_dir} $i
done done
lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
for i in ${lbin_dir[@]};do for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i check_link ${bin_link_dir}/$i
done done
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
...@@ -171,7 +171,7 @@ function check_lib_path() { ...@@ -171,7 +171,7 @@ function check_lib_path() {
function check_header_path() { function check_header_path() {
# check all header # check all header
header_dir=("taos.h" "taoserror.h") header_dir=("taos.h" "taoserror.h")
for i in ${header_dir[@]};do for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i check_link ${inc_link_dir}/$i
done done
echo -e "Check bin path:\033[32mOK\033[0m!" echo -e "Check bin path:\033[32mOK\033[0m!"
...@@ -179,7 +179,8 @@ function check_header_path() { ...@@ -179,7 +179,8 @@ function check_header_path() {
function check_blm3_config_dir() { function check_blm3_config_dir() {
# check all config # check all config
check_file ${cfg_install_dir} blm3.toml check_file ${cfg_install_dir} blm.toml
check_file ${cfg_install_dir} blm3.service
check_file ${install_main_dir}/cfg blm.toml.org check_file ${install_main_dir}/cfg blm.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!" echo -e "Check conf path:\033[32mOK\033[0m!"
} }
......
...@@ -11,4 +11,3 @@ Maintainer: support@taosdata.com ...@@ -11,4 +11,3 @@ Maintainer: support@taosdata.com
Provides: taosdata Provides: taosdata
Homepage: http://taosdata.com Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT. Description: Big Data Platform Designed and Optimized for IoT.
...@@ -32,5 +32,9 @@ if [ -f "${install_main_dir}/blm.toml" ]; then ...@@ -32,5 +32,9 @@ if [ -f "${install_main_dir}/blm.toml" ]; then
${csudo} rm -f ${install_main_dir}/cfg/blm.toml || : ${csudo} rm -f ${install_main_dir}/cfg/blm.toml || :
fi fi
if [ -f "${install_main_dir}/blm3.service" ]; then
${csudo} rm -f ${install_main_dir}/cfg/blm3.service || :
fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo} rm -f ${install_main_dir}/driver/libtaos* || : ${csudo} rm -f ${install_main_dir}/driver/libtaos* || :
...@@ -47,6 +47,9 @@ cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_pat ...@@ -47,6 +47,9 @@ cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_pat
if [ -f "${compile_dir}/test/cfg/blm.toml" ]; then if [ -f "${compile_dir}/test/cfg/blm.toml" ]; then
cp ${compile_dir}/test/cfg/blm.toml ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/test/cfg/blm.toml ${pkg_dir}${install_home_path}/cfg
fi fi
if [ -f "${compile_dir}/test/cfg/blm3.service" ]; then
cp ${compile_dir}/test/cfg/blm3.service ${pkg_dir}${install_home_path}/cfg ||:
fi
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
...@@ -68,19 +71,24 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat ...@@ -68,19 +71,24 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
else
echo "grafanaplugin bundled directory not found!"
exit 1
fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
install_user_local_path="/usr/local"
if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
mkdir -p ${pkg_dir}${install_user_local_path}/lib
cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23
ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so
fi
if [ -f ${compile_dir}/build/lib/libavro.a ]; then
cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/
fi
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
install_user_local_path="/usr/local"
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
...@@ -120,6 +128,10 @@ chmod 755 ${pkg_dir}/DEBIAN/* ...@@ -120,6 +128,10 @@ chmod 755 ${pkg_dir}/DEBIAN/*
debver="Version: "$tdengine_ver debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control
fi
#get taos version, then set deb name #get taos version, then set deb name
...@@ -151,4 +163,3 @@ cp ${pkg_dir}/*.deb ${output_dir} ...@@ -151,4 +163,3 @@ cp ${pkg_dir}/*.deb ${output_dir}
# clean tmep dir # clean tmep dir
rm -rf ${pkg_dir} rm -rf ${pkg_dir}
...@@ -45,24 +45,32 @@ echo "version=${version}" ...@@ -45,24 +45,32 @@ echo "version=${version}"
#docker manifest rm tdengine/tdengine:${version} #docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then if [ "$verType" == "beta" ]; then
docker manifest inspect tdengine/tdengine-beta:latest docker manifest inspect tdengine/tdengine-beta:latest
docker manifest inspect tdengine/tdengine-beta:${version}
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
sleep 30
docker manifest rm tdengine/tdengine-beta:${version}
docker manifest rm tdengine/tdengine-beta:latest docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine-beta:latest
docker manifest push tdengine/tdengine-beta:${version} docker manifest push tdengine/tdengine-beta:${version}
docker manifest push tdengine/tdengine-beta:latest
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
docker manifest inspect tdengine/tdengine:latest docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
sleep 30
docker manifest rm tdengine/tdengine:latest docker manifest rm tdengine/tdengine:latest
docker manifest rm tdengine/tdengine:${version}
docker manifest inspect tdengine/tdengine:latest
docker manifest inspect tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:latest
docker manifest push tdengine/tdengine:${version} docker manifest push tdengine/tdengine:${version}
docker manifest push tdengine/tdengine:latest
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1
......
...@@ -151,7 +151,7 @@ function vercomp () { ...@@ -151,7 +151,7 @@ function vercomp () {
} }
# 1. check version information # 1. check version information
if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then
echo "please enter correct version" echo "please enter correct version"
exit 0 exit 0
fi fi
...@@ -213,7 +213,7 @@ else ...@@ -213,7 +213,7 @@ else
exit 1 exit 1
fi fi
make -j8 make -j8 && ${csudo} make install
cd ${curr_dir} cd ${curr_dir}
......
...@@ -32,20 +32,20 @@ if command -v sudo > /dev/null; then ...@@ -32,20 +32,20 @@ if command -v sudo > /dev/null; then
fi fi
function cp_rpm_package() { function cp_rpm_package() {
local cur_dir local cur_dir
cd $1 cd $1
cur_dir=$(pwd) cur_dir=$(pwd)
for dirlist in $(ls ${cur_dir}); do for dirlist in "$(ls ${cur_dir})"; do
if test -d ${dirlist}; then if test -d ${dirlist}; then
cd ${dirlist} cd ${dirlist}
cp_rpm_package ${cur_dir}/${dirlist} cp_rpm_package ${cur_dir}/${dirlist}
cd .. cd ..
fi fi
if test -e ${dirlist}; then if test -e ${dirlist}; then
cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
fi fi
done done
} }
if [ -d ${pkg_dir} ]; then if [ -d ${pkg_dir} ]; then
...@@ -56,6 +56,10 @@ cd ${pkg_dir} ...@@ -56,6 +56,10 @@ cd ${pkg_dir}
${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file}
fi
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, and modify package name, then clean temp dir # copy rpm package to output_dir, and modify package name, then clean temp dir
......
...@@ -57,6 +57,9 @@ cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg ...@@ -57,6 +57,9 @@ cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
if [ -f %{_compiledir}/test/cfg/blm.toml ]; then if [ -f %{_compiledir}/test/cfg/blm.toml ]; then
cp %{_compiledir}/test/cfg/blm.toml %{buildroot}%{homepath}/cfg cp %{_compiledir}/test/cfg/blm.toml %{buildroot}%{homepath}/cfg
fi fi
if [ -f %{_compiledir}/test/cfg/blm3.service ]; then
cp %{_compiledir}/test/cfg/blm3.service %{buildroot}%{homepath}/cfg
fi
cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script
...@@ -73,18 +76,20 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin ...@@ -73,18 +76,20 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then
cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver
ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23
ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so
fi
if [ -f %{_compiledir}/build/lib/libavro.a ]; then
cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver
fi
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin mkdir -p %{buildroot}%{userlocalpath}/bin
...@@ -151,16 +156,21 @@ if pidof taosd &> /dev/null; then ...@@ -151,16 +156,21 @@ if pidof taosd &> /dev/null; then
echo "Stop taosd service success!" echo "Stop taosd service success!"
sleep 1 sleep 1
fi fi
# if taos.cfg already softlink, remove it # if taos.cfg already exist, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then if [ -f %{cfg_install_dir}/taos.cfg ]; then
${csudo} rm -f %{homepath}/cfg/taos.cfg || : ${csudo} rm -f %{homepath}/cfg/taos.cfg || :
fi fi
# if blm.toml already softlink, remove it # if blm.toml already exist, remove it
if [ -f %{cfg_install_dir}/blm.toml ]; then if [ -f %{cfg_install_dir}/blm.toml ]; then
${csudo} rm -f %{homepath}/cfg/blm.toml || : ${csudo} rm -f %{homepath}/cfg/blm.toml || :
fi fi
# if blm3.service already softlink, remove it
if [ -f %{cfg_install_dir}/blm3.service ]; then
${csudo} rm -f %{homepath}/cfg/blm3.service || :
fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo} rm -f %{homepath}/driver/libtaos* || : ${csudo} rm -f %{homepath}/driver/libtaos* || :
......
# /bin/bash #!/bin/bash
# #
CSI=$(echo -e "\033[") CSI=$(echo -e "\033[")
CRED="${CSI}1;31m" CRED="${CSI}1;31m"
......
...@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() { ...@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -358,7 +358,7 @@ function is_correct_ipaddr() { ...@@ -358,7 +358,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
...@@ -679,8 +679,8 @@ function install_service_on_systemd() { ...@@ -679,8 +679,8 @@ function install_service_on_systemd() {
taosd_service_config="${service_config_dir}/taosd.service" taosd_service_config="${service_config_dir}/taosd.service"
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
...@@ -756,6 +756,11 @@ function install_service_on_systemd() { ...@@ -756,6 +756,11 @@ function install_service_on_systemd() {
fi fi
} }
function install_blm3_service() {
[ -f ${script_dir}/cfg/blm3.service ] &&\
${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/
}
function install_service() { function install_service() {
if ((${service_mod}==0)); then if ((${service_mod}==0)); then
install_service_on_systemd install_service_on_systemd
...@@ -878,6 +883,7 @@ function update_TDengine() { ...@@ -878,6 +883,7 @@ function update_TDengine() {
if [ -z $1 ]; then if [ -z $1 ]; then
install_bin install_bin
install_service install_service
install_blm3_service
install_config install_config
install_blm3_config install_blm3_config
...@@ -959,6 +965,7 @@ function install_TDengine() { ...@@ -959,6 +965,7 @@ function install_TDengine() {
# For installing new # For installing new
install_bin install_bin
install_service install_service
install_blm3_service
openresty_work=false openresty_work=false
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { ...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -342,7 +342,7 @@ function is_correct_ipaddr() { ...@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() { ...@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -305,7 +305,7 @@ function set_hostname() { ...@@ -305,7 +305,7 @@ function set_hostname() {
echo "set hostname fail!" echo "set hostname fail!"
return return
fi fi
#ubuntu/centos /etc/hostname #ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then if [[ -e /etc/hostname ]]; then
${csudo} echo $newHostname > /etc/hostname ||: ${csudo} echo $newHostname > /etc/hostname ||:
...@@ -330,7 +330,7 @@ function is_correct_ipaddr() { ...@@ -330,7 +330,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { ...@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -342,7 +342,7 @@ function is_correct_ipaddr() { ...@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
......
...@@ -212,7 +212,8 @@ function install_jemalloc() { ...@@ -212,7 +212,8 @@ function install_jemalloc() {
fi fi
if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc /usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
/usr/local/include/jemalloc
fi fi
if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then
/usr/bin/install -c -d /usr/local/lib /usr/bin/install -c -d /usr/local/lib
...@@ -225,23 +226,47 @@ function install_jemalloc() { ...@@ -225,23 +226,47 @@ function install_jemalloc() {
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig /usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
/usr/local/lib/pkgconfig
fi
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi fi
fi fi
if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc /usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
/usr/local/share/doc/jemalloc
fi fi
if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then
/usr/bin/install -c -d /usr/local/share/man/man3 /usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3 /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
/usr/local/share/man/man3
fi fi
if [ -d /etc/ld.so.conf.d ]; then fi
echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf }
${csudo} ldconfig
else function install_avro() {
echo "/etc/ld.so.conf.d not found!" if [ "$osType" != "Darwin" ]; then
if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
/usr/bin/install -c -d /usr/local/$1
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
ln -sf libavro.so.23 /usr/local/$1/libavro.so
/usr/bin/install -c -d /usr/local/$1
[ -f ${binary_dir}/build/$1/libavro.a ] &&
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
${csudo} ldconfig
else
echo "/etc/ld.so.conf.d not found!"
fi
fi fi
fi fi
} }
...@@ -292,6 +317,8 @@ function install_lib() { ...@@ -292,6 +317,8 @@ function install_lib() {
fi fi
install_jemalloc install_jemalloc
install_avro lib
install_avro lib64
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig ${csudo} ldconfig
...@@ -381,11 +408,6 @@ function install_data() { ...@@ -381,11 +408,6 @@ function install_data() {
} }
function install_connector() { function install_connector() {
if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else else
...@@ -481,8 +503,8 @@ function install_service_on_systemd() { ...@@ -481,8 +503,8 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
...@@ -503,6 +525,11 @@ function install_service_on_systemd() { ...@@ -503,6 +525,11 @@ function install_service_on_systemd() {
${csudo} systemctl enable taosd ${csudo} systemctl enable taosd
} }
function install_blm3_service() {
[ -f ${script_dir}/cfg/blm3.service ] &&\
${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/
}
function install_service() { function install_service() {
if ((${service_mod}==0)); then if ((${service_mod}==0)); then
install_service_on_systemd install_service_on_systemd
...@@ -544,6 +571,7 @@ function update_TDengine() { ...@@ -544,6 +571,7 @@ function update_TDengine() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
install_service install_service
install_blm3_service
fi fi
install_config install_config
...@@ -598,6 +626,7 @@ function install_TDengine() { ...@@ -598,6 +626,7 @@ function install_TDengine() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
install_service install_service
install_blm3_service
fi fi
install_config install_config
......
...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector" ...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo ...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then #if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: # cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
# else
# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
# fi
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then # if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector # cp -r ${connector_dir}/go ${install_dir}/connector
# else # else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# #
# This file is used to install tdengine rpm package on centos systems. The operating system # This file is used to install tdengine rpm package on centos systems. The operating system
# is required to use systemd to manage services at boot # is required to use systemd to manage services at boot
#set -x # set -x
iplist="" iplist=""
serverFqdn="" serverFqdn=""
...@@ -86,6 +86,24 @@ function install_include() { ...@@ -86,6 +86,24 @@ function install_include() {
${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h ${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
} }
function install_avro_lib() {
${csudo} rm -f ${lib_link_dir}/libavro* || :
${csudo} rm -f ${lib64_link_dir}/libavro* || :
if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then
${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0
${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23
${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then
${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || :
${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || :
${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || :
fi
fi
${csudo} ldconfig
}
function install_lib() { function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos* || : ${csudo} rm -f ${lib_link_dir}/libtaos* || :
${csudo} rm -f ${lib64_link_dir}/libtaos* || : ${csudo} rm -f ${lib64_link_dir}/libtaos* || :
...@@ -97,6 +115,8 @@ function install_lib() { ...@@ -97,6 +115,8 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi fi
${csudo} ldconfig
} }
function install_bin() { function install_bin() {
...@@ -127,7 +147,7 @@ function add_newHostname_to_hosts() { ...@@ -127,7 +147,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost) arr=($iphost)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$localIp" ]]; then if [[ "$s" == "$localIp" ]]; then
return return
...@@ -182,7 +202,7 @@ function is_correct_ipaddr() { ...@@ -182,7 +202,7 @@ function is_correct_ipaddr() {
IFS=" " IFS=" "
arr=($iplist) arr=($iplist)
IFS="$OLD_IFS" IFS="$OLD_IFS"
for s in ${arr[@]} for s in "${arr[@]}"
do do
if [[ "$s" == "$newIp" ]]; then if [[ "$s" == "$newIp" ]]; then
return 0 return 0
...@@ -424,8 +444,8 @@ function install_service_on_systemd() { ...@@ -424,8 +444,8 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'After=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Wants=network-online.target blm3.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
...@@ -446,6 +466,11 @@ function install_service_on_systemd() { ...@@ -446,6 +466,11 @@ function install_service_on_systemd() {
${csudo} systemctl enable taosd ${csudo} systemctl enable taosd
} }
function install_blm3_service() {
[ -f ${script_dir}/cfg/blm3.service ] &&\
${csudo} cp ${script_dir}/cfg/blm3.service ${service_config_dir}/
}
function install_service() { function install_service() {
if ((${service_mod}==0)); then if ((${service_mod}==0)); then
install_service_on_systemd install_service_on_systemd
...@@ -474,8 +499,10 @@ function install_TDengine() { ...@@ -474,8 +499,10 @@ function install_TDengine() {
# Install include, lib, binary and service # Install include, lib, binary and service
install_include install_include
install_lib install_lib
install_avro_lib
install_bin install_bin
install_service install_service
install_blm3_service
install_config install_config
install_blm3_config install_blm3_config
......
...@@ -58,6 +58,12 @@ function kill_taosd() { ...@@ -58,6 +58,12 @@ function kill_taosd() {
} }
function clean_service_on_systemd() { function clean_service_on_systemd() {
blm3_service_config="${service_config_dir}/blm3.service"
if systemctl is-active --quiet blm3; then
echo "blm3 is running, stopping it..."
${csudo} systemctl stop blm3 &> /dev/null || echo &> /dev/null
fi
taosd_service_config="${service_config_dir}/${taos_service_name}.service" taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then if systemctl is-active --quiet ${taos_service_name}; then
...@@ -67,6 +73,9 @@ function clean_service_on_systemd() { ...@@ -67,6 +73,9 @@ function clean_service_on_systemd() {
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config} ${csudo} rm -f ${taosd_service_config}
[ -f ${blm3_service_config} ] && ${csudo} rm -f ${blm3_service_config}
} }
function clean_service_on_sysvinit() { function clean_service_on_sysvinit() {
......
...@@ -111,12 +111,14 @@ function clean_log() { ...@@ -111,12 +111,14 @@ function clean_log() {
function clean_service_on_systemd() { function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service" taosd_service_config="${service_config_dir}/${taos_service_name}.service"
blm3_service_config="${service_config_dir}/blm3.service"
if systemctl is-active --quiet ${taos_service_name}; then if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..." echo "TDengine taosd is running, stopping it..."
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi fi
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config} ${csudo} rm -f ${taosd_service_config}
[ -f ${blm3_service_config} ] && ${sudo} rm -f ${blm3_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then if systemctl is-active --quiet ${tarbitrator_service_name}; then
......
...@@ -48,4 +48,3 @@ if [ ${coreFlag} = "unlimited" ];then ...@@ -48,4 +48,3 @@ if [ ${coreFlag} = "unlimited" ];then
fi fi
fi fi
/usr/bin/blm3 &
name: tdengine name: tdengine
base: core20 base: core20
version: '2.3.0.0' version: '2.3.1.0'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
......
...@@ -66,8 +66,7 @@ typedef struct { ...@@ -66,8 +66,7 @@ typedef struct {
int32_t affectedRows; int32_t affectedRows;
} SSmlLinesInfo; } SSmlLinesInfo;
char* addEscapeCharToString(char *str, int32_t len);
void addEscapeCharToString(char *str, int32_t len);
int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info); int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info);
bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info); bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info);
bool isValidInteger(char *str); bool isValidInteger(char *str);
......
...@@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* ...@@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
if (code != 0) { if (code != 0) {
tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName); tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
return code; return code;
} else {
pointSchema->precision = dbSchema.precision;
destroySmlSTableSchema(&dbSchema);
} }
} else if (code == TSDB_CODE_SUCCESS) { }
if (code == TSDB_CODE_SUCCESS) {
pointSchema->precision = dbSchema.precision;
size_t pointTagSize = taosArrayGetSize(pointSchema->tags); size_t pointTagSize = taosArrayGetSize(pointSchema->tags);
size_t pointFieldSize = taosArrayGetSize(pointSchema->fields); size_t pointFieldSize = taosArrayGetSize(pointSchema->fields);
...@@ -1177,13 +1177,14 @@ static void escapeSpecialCharacter(uint8_t field, const char **pos) { ...@@ -1177,13 +1177,14 @@ static void escapeSpecialCharacter(uint8_t field, const char **pos) {
*pos = cur; *pos = cur;
} }
void addEscapeCharToString(char *str, int32_t len) { char* addEscapeCharToString(char *str, int32_t len) {
if (str == NULL) { if (str == NULL) {
return; return NULL;
} }
memmove(str + 1, str, len); memmove(str + 1, str, len);
str[0] = str[len + 1] = TS_ESCAPE_CHAR; str[0] = str[len + 1] = TS_ESCAPE_CHAR;
str[len + 2] = '\0'; str[len + 2] = '\0';
return str;
} }
bool isValidInteger(char *str) { bool isValidInteger(char *str) {
...@@ -1907,8 +1908,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash ...@@ -1907,8 +1908,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
} }
//Escape special character //Escape special character
if (*cur == '\\') { if (*cur == '\\') {
//TODO: escape will work after column & tag
//support spcial characters
escapeSpecialCharacter(2, &cur); escapeSpecialCharacter(2, &cur);
} }
key[len] = *cur; key[len] = *cur;
...@@ -1985,6 +1984,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, ...@@ -1985,6 +1984,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
//Escape special character //Escape special character
if (*cur == '\\') { if (*cur == '\\') {
escapeSpecialCharacter(isTag ? 2 : 3, &cur); escapeSpecialCharacter(isTag ? 2 : 3, &cur);
len++;
} }
cur++; cur++;
len++; len++;
...@@ -2107,6 +2107,13 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -2107,6 +2107,13 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
pkv = *pKVs; pkv = *pKVs;
} }
size_t childTableNameLen = strlen(tsSmlChildTableName);
char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
if (childTableNameLen != 0) {
memcpy(childTableName, tsSmlChildTableName, childTableNameLen);
addEscapeCharToString(childTableName, (int32_t)(childTableNameLen));
}
while (*cur != '\0') { while (*cur != '\0') {
ret = parseSmlKey(pkv, &cur, pHash, info); ret = parseSmlKey(pkv, &cur, pHash, info);
if (ret) { if (ret) {
...@@ -2118,7 +2125,8 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -2118,7 +2125,8 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
tscError("SML:0x%"PRIx64" Unable to parse value", info->id); tscError("SML:0x%"PRIx64" Unable to parse value", info->id);
goto error; goto error;
} }
if (!isField && (strcasecmp(pkv->key, "`ID`") == 0)) {
if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) {
smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1); smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1);
memcpy(smlData->childTableName, pkv->value, pkv->length); memcpy(smlData->childTableName, pkv->value, pkv->length);
strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length); strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length);
......
...@@ -305,6 +305,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -305,6 +305,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
*pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV)); *pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV));
pkv = *pKVs; pkv = *pKVs;
size_t childTableNameLen = strlen(tsSmlChildTableName);
char childTbName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
if (childTableNameLen != 0) {
memcpy(childTbName, tsSmlChildTableName, childTableNameLen);
addEscapeCharToString(childTbName, (int32_t)(childTableNameLen));
}
while (*cur != '\0') { while (*cur != '\0') {
ret = parseTelnetTagKey(pkv, &cur, pHash, info); ret = parseTelnetTagKey(pkv, &cur, pHash, info);
if (ret) { if (ret) {
...@@ -316,7 +322,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -316,7 +322,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
tscError("OTD:0x%"PRIx64" Unable to parse value", info->id); tscError("OTD:0x%"PRIx64" Unable to parse value", info->id);
return ret; return ret;
} }
if ((strcasecmp(pkv->key, "`ID`") == 0)) { if (childTableNameLen != 0 && strcasecmp(pkv->key, childTbName) == 0) {
*childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1); *childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1);
memcpy(*childTableName, pkv->value, pkv->length); memcpy(*childTableName, pkv->value, pkv->length);
(*childTableName)[pkv->length] = '\0'; (*childTableName)[pkv->length] = '\0';
...@@ -892,26 +898,33 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, ...@@ -892,26 +898,33 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs,
if (tags == NULL || tags->type != cJSON_Object) { if (tags == NULL || tags->type != cJSON_Object) {
return TSDB_CODE_TSC_INVALID_JSON; return TSDB_CODE_TSC_INVALID_JSON;
} }
//only pick up the first ID value as child table name
cJSON *id = cJSON_GetObjectItem(tags, "ID"); //handle child table name
if (id != NULL) { size_t childTableNameLen = strlen(tsSmlChildTableName);
if (!cJSON_IsString(id)) { char childTbName[TSDB_TABLE_NAME_LEN] = {0};
tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id); if (childTableNameLen != 0) {
return TSDB_CODE_TSC_INVALID_JSON; memcpy(childTbName, tsSmlChildTableName, childTableNameLen);
} cJSON *id = cJSON_GetObjectItem(tags, childTbName);
size_t idLen = strlen(id->valuestring);
*childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
memcpy(*childTableName, id->valuestring, idLen);
strntolower_s(*childTableName, *childTableName, (int32_t)idLen);
addEscapeCharToString(*childTableName, (int32_t)idLen);
//check duplicate IDs
cJSON_DeleteItemFromObject(tags, "ID");
id = cJSON_GetObjectItem(tags, "ID");
if (id != NULL) { if (id != NULL) {
return TSDB_CODE_TSC_DUP_TAG_NAMES; if (!cJSON_IsString(id)) {
tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id);
return TSDB_CODE_TSC_INVALID_JSON;
}
size_t idLen = strlen(id->valuestring);
*childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
memcpy(*childTableName, id->valuestring, idLen);
strntolower_s(*childTableName, *childTableName, (int32_t)idLen);
addEscapeCharToString(*childTableName, (int32_t)idLen);
//check duplicate IDs
cJSON_DeleteItemFromObject(tags, childTbName);
id = cJSON_GetObjectItem(tags, childTbName);
if (id != NULL) {
return TSDB_CODE_TSC_DUP_TAG_NAMES;
}
} }
} }
int32_t tagNum = cJSON_GetArraySize(tags); int32_t tagNum = cJSON_GetArraySize(tags);
//at least one tag pair required //at least one tag pair required
if (tagNum <= 0) { if (tagNum <= 0) {
......
...@@ -2495,6 +2495,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2495,6 +2495,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg12 = "parameter is out of range [1, 100]"; const char* msg12 = "parameter is out of range [1, 100]";
const char* msg13 = "parameter list required"; const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
const char* msg15 = "parameter is out of range [1, 1000]";
switch (functionId) { switch (functionId) {
case TSDB_FUNC_COUNT: { case TSDB_FUNC_COUNT: {
...@@ -2942,11 +2943,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2942,11 +2943,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} }
} }
} else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t numRowsSelected = GET_INT32_VAL(val); int64_t numRowsSelected = GET_INT64_VAL(val);
if (numRowsSelected <= 0 || numRowsSelected > 1000) { if (numRowsSelected <= 0 || numRowsSelected > 1000) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
} }
// todo REFACTOR // todo REFACTOR
......
...@@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime; ...@@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime;
// common // common
extern int tsRpcTimer; extern int tsRpcTimer;
extern int tsRpcMaxTime; extern int tsRpcMaxTime;
extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections; extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns; extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer; extern int32_t tsShellActivityTimer;
...@@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores; ...@@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores;
extern int8_t tsDaylight; extern int8_t tsDaylight;
extern char tsTimezone[]; extern char tsTimezone[];
extern char tsLocale[]; extern char tsLocale[];
extern char tsCharset[]; // default encode string extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile; extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize; extern int32_t tsCompressMsgSize;
extern int32_t tsCompressColData; extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults; extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[]; extern char tsTempDir[];
//query buffer management // query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing extern int64_t
extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked
extern int8_t tsKeepOriginalColumnName; extern int8_t tsKeepOriginalColumnName;
// client // client
extern int32_t tsMaxSQLStringLen; extern int32_t tsMaxSQLStringLen;
...@@ -108,7 +109,7 @@ extern int32_t tsQuorum; ...@@ -108,7 +109,7 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate; extern int8_t tsUpdate;
extern int8_t tsCacheLastRow; extern int8_t tsCacheLastRow;
//tsdb // tsdb
extern bool tsdbForceKeepFile; extern bool tsdbForceKeepFile;
extern bool tsdbForceCompactFile; extern bool tsdbForceCompactFile;
extern int32_t tsdbWalFlushSize; extern int32_t tsdbWalFlushSize;
...@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress; ...@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql; extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum; extern int8_t tsTelegrafUseFieldNum;
extern int8_t tsHttpDbNameMandatory; extern int8_t tsHttpDbNameMandatory;
extern int32_t tsHttpKeepAlive;
// mqtt // mqtt
extern int8_t tsEnableMqttModule; extern int8_t tsEnableMqttModule;
...@@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3]; ...@@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen; extern int32_t tsTopicBianryLen;
// system info // system info
extern char tsOsName[]; extern char tsOsName[];
extern int64_t tsPageSize; extern int64_t tsPageSize;
extern int64_t tsOpenMax; extern int64_t tsOpenMax;
extern int64_t tsStreamMax; extern int64_t tsStreamMax;
extern int32_t tsNumOfCores; extern int32_t tsNumOfCores;
extern float tsTotalLogDirGB; extern float tsTotalLogDirGB;
extern float tsTotalTmpDirGB; extern float tsTotalTmpDirGB;
extern float tsTotalDataDirGB; extern float tsTotalDataDirGB;
extern float tsAvailLogDirGB; extern float tsAvailLogDirGB;
extern float tsAvailTmpDirectorySpace; extern float tsAvailTmpDirectorySpace;
extern float tsAvailDataDirGB; extern float tsAvailDataDirGB;
extern float tsUsedDataDirGB; extern float tsUsedDataDirGB;
extern float tsMinimalLogDirGB; extern float tsMinimalLogDirGB;
extern float tsReservedTmpDirectorySpace; extern float tsReservedTmpDirectorySpace;
extern float tsMinimalDataDirGB; extern float tsMinimalDataDirGB;
extern int32_t tsTotalMemoryMB; extern int32_t tsTotalMemoryMB;
extern uint32_t tsVersion; extern uint32_t tsVersion;
// build info // build info
...@@ -196,43 +198,44 @@ extern char gitinfoOfInternal[]; ...@@ -196,43 +198,44 @@ extern char gitinfoOfInternal[];
extern char buildinfo[]; extern char buildinfo[];
// log // log
extern int8_t tsAsyncLog; extern int8_t tsAsyncLog;
extern int32_t tsNumOfLogLines; extern int32_t tsNumOfLogLines;
extern int32_t tsLogKeepDays; extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag; extern int32_t dDebugFlag;
extern int32_t vDebugFlag; extern int32_t vDebugFlag;
extern int32_t mDebugFlag; extern int32_t mDebugFlag;
extern uint32_t cDebugFlag; extern uint32_t cDebugFlag;
extern int32_t jniDebugFlag; extern int32_t jniDebugFlag;
extern int32_t tmrDebugFlag; extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag; extern int32_t sdbDebugFlag;
extern int32_t httpDebugFlag; extern int32_t httpDebugFlag;
extern int32_t mqttDebugFlag; extern int32_t mqttDebugFlag;
extern int32_t monDebugFlag; extern int32_t monDebugFlag;
extern int32_t uDebugFlag; extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag; extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag; extern int32_t odbcDebugFlag;
extern uint32_t qDebugFlag; extern uint32_t qDebugFlag;
extern int32_t wDebugFlag; extern int32_t wDebugFlag;
extern int32_t cqDebugFlag; extern int32_t cqDebugFlag;
extern int32_t debugFlag; extern int32_t debugFlag;
extern int8_t tsClientMerge; extern int8_t tsClientMerge;
#ifdef TD_TSZ #ifdef TD_TSZ
// lossy // lossy
extern char lossyColumns[]; extern char lossyColumns[];
extern double fPrecision; extern double fPrecision;
extern double dPrecision; extern double dPrecision;
extern uint32_t maxRange; extern uint32_t maxRange;
extern uint32_t curRange; extern uint32_t curRange;
extern char Compressor[]; extern char Compressor[];
#endif #endif
// long query // long query
extern int8_t tsDeadLockKillQuery; extern int8_t tsDeadLockKillQuery;
// schemaless // schemaless
extern char tsDefaultJSONStrType[]; extern char tsDefaultJSONStrType[];
extern char tsSmlChildTableName[];
typedef struct { typedef struct {
......
...@@ -14,18 +14,18 @@ ...@@ -14,18 +14,18 @@
*/ */
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "tglobal.h"
#include "monitor.h"
#include "os.h" #include "os.h"
#include "taosdef.h" #include "taosdef.h"
#include "taoserror.h" #include "taoserror.h"
#include "tulog.h" #include "tcompare.h"
#include "tconfig.h" #include "tconfig.h"
#include "tglobal.h"
#include "monitor.h"
#include "tsocket.h"
#include "tutil.h"
#include "tlocale.h" #include "tlocale.h"
#include "tsocket.h"
#include "ttimezone.h" #include "ttimezone.h"
#include "tcompare.h" #include "tulog.h"
#include "tutil.h"
// cluster // cluster
char tsFirst[TSDB_EP_LEN] = {0}; char tsFirst[TSDB_EP_LEN] = {0};
...@@ -49,16 +49,16 @@ int32_t tsDnodeId = 0; ...@@ -49,16 +49,16 @@ int32_t tsDnodeId = 0;
int64_t tsDnodeStartTime = 0; int64_t tsDnodeStartTime = 0;
// common // common
int32_t tsRpcTimer = 300; int32_t tsRpcTimer = 300;
int32_t tsRpcMaxTime = 600; // seconds; int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default
int32_t tsMaxShellConns = 50000; int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000; int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0f; float tsNumOfThreadsPerCore = 1.0f;
int32_t tsNumOfCommitThreads = 4; int32_t tsNumOfCommitThreads = 4;
float tsRatioOfQueryCores = 1.0f; float tsRatioOfQueryCores = 1.0f;
int8_t tsDaylight = 0; int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
...@@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; ...@@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN; int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0; int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from // the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp // one virtual node, to order according to timestamp
...@@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000; ...@@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000;
int32_t tsMinSlidingTime = 10; int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result // the maxinum number of distict query result
int32_t tsMaxNumOfDistinctResults = 1000 * 10000; int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 us for interval time range, changed accordingly // 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1; int32_t tsMinIntervalTime = 1;
...@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000; ...@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t tsStreamCompStartDelay = 10000; int32_t tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly // the stream computing delay time after executing failed, change accordingly
int32_t tsRetryStreamCompDelay = 10*1000; int32_t tsRetryStreamCompDelay = 10 * 1000;
// The delayed computing ration. 10% of the whole computing time window by default. // The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f; float tsStreamComputDelayRatio = 0.1f;
...@@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1; ...@@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1;
int32_t tsRetrieveBlockingModel = 0; int32_t tsRetrieveBlockingModel = 0;
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name // last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
int8_t tsKeepOriginalColumnName = 0; int8_t tsKeepOriginalColumnName = 0;
// db parameters // db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP; int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK; int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK; int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION; int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION;
int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
int32_t tsMaxVgroupsPerDb = 0; int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO; int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
// tsdb config // tsdb config
// For backward compatibility // For backward compatibility
bool tsdbForceKeepFile = false; bool tsdbForceKeepFile = false;
bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly
int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB
// balance // balance
int8_t tsEnableBalance = 1; int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0; int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4; int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1; int8_t tsEnableFlowCtrl = 1;
...@@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1; ...@@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0; int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0; int8_t tsTelegrafUseFieldNum = 0;
int8_t tsHttpDbNameMandatory = 0; int8_t tsHttpDbNameMandatory = 0;
int32_t tsHttpKeepAlive = 30000;
// mqtt // mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor // monitor
int8_t tsEnableMonitorModule = 1; int8_t tsEnableMonitorModule = 1;
...@@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey"; ...@@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds int32_t tsMonitorInterval = 30; // seconds
// stream // stream
int8_t tsEnableStream = 1; int8_t tsEnableStream = 1;
// internal // internal
int8_t tsCompactMnodeWal = 0; int8_t tsCompactMnodeWal = 0;
...@@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0}; ...@@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0};
char tsScriptDir[PATH_MAX] = {0}; char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/"; char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0; int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000; int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE #ifndef _STORAGE
...@@ -231,42 +232,42 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS]; ...@@ -231,42 +232,42 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS];
int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L}; int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L};
// system info // system info
char tsOsName[10] = "Linux"; char tsOsName[10] = "Linux";
int64_t tsPageSize; int64_t tsPageSize;
int64_t tsOpenMax; int64_t tsOpenMax;
int64_t tsStreamMax; int64_t tsStreamMax;
int32_t tsNumOfCores = 1; int32_t tsNumOfCores = 1;
float tsTotalTmpDirGB = 0; float tsTotalTmpDirGB = 0;
float tsTotalDataDirGB = 0; float tsTotalDataDirGB = 0;
float tsAvailTmpDirectorySpace = 0; float tsAvailTmpDirectorySpace = 0;
float tsAvailDataDirGB = 0; float tsAvailDataDirGB = 0;
float tsUsedDataDirGB = 0; float tsUsedDataDirGB = 0;
float tsReservedTmpDirectorySpace = 1.0f; float tsReservedTmpDirectorySpace = 1.0f;
float tsMinimalDataDirGB = 2.0f; float tsMinimalDataDirGB = 2.0f;
int32_t tsTotalMemoryMB = 0; int32_t tsTotalMemoryMB = 0;
uint32_t tsVersion = 0; uint32_t tsVersion = 0;
// log // log
int32_t tsNumOfLogLines = 10000000; int32_t tsNumOfLogLines = 10000000;
int32_t mDebugFlag = 131; int32_t mDebugFlag = 131;
int32_t sdbDebugFlag = 131; int32_t sdbDebugFlag = 131;
int32_t dDebugFlag = 135; int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135; int32_t vDebugFlag = 135;
uint32_t cDebugFlag = 131; uint32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131; int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131; int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131; int32_t httpDebugFlag = 131;
int32_t mqttDebugFlag = 131; int32_t mqttDebugFlag = 131;
int32_t monDebugFlag = 131; int32_t monDebugFlag = 131;
uint32_t qDebugFlag = 131; uint32_t qDebugFlag = 131;
int32_t rpcDebugFlag = 131; int32_t rpcDebugFlag = 131;
int32_t uDebugFlag = 131; int32_t uDebugFlag = 131;
int32_t debugFlag = 0; int32_t debugFlag = 0;
int32_t sDebugFlag = 135; int32_t sDebugFlag = 135;
int32_t wDebugFlag = 135; int32_t wDebugFlag = 135;
int32_t tsdbDebugFlag = 131; int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 131; int32_t cqDebugFlag = 131;
int32_t fsDebugFlag = 135; int32_t fsDebugFlag = 135;
int8_t tsClientMerge = 0; int8_t tsClientMerge = 0;
...@@ -274,13 +275,14 @@ int8_t tsClientMerge = 0; ...@@ -274,13 +275,14 @@ int8_t tsClientMerge = 0;
// //
// lossy compress 6 // lossy compress 6
// //
char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress. char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
// below option can take effect when tsLossyColumns not empty // can close lossy compress.
double fPrecision = 1E-8; // float column precision // below option can take effect when tsLossyColumns not empty
double dPrecision = 1E-16; // double column precision double fPrecision = 1E-8; // float column precision
uint32_t maxRange = 500; // max range double dPrecision = 1E-16; // double column precision
uint32_t curRange = 100; // range uint32_t maxRange = 500; // max range
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR uint32_t curRange = 100; // range
char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif #endif
// long query death-lock // long query death-lock
...@@ -288,6 +290,7 @@ int8_t tsDeadLockKillQuery = 0; ...@@ -288,6 +290,7 @@ int8_t tsDeadLockKillQuery = 0;
// default JSON string type // default JSON string type
char tsDefaultJSONStrType[7] = "binary"; char tsDefaultJSONStrType[7] = "binary";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash.
int32_t (*monStartSystemFp)() = NULL; int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL; void (*monStopSystemFp)() = NULL;
...@@ -298,7 +301,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"}; ...@@ -298,7 +301,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"};
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT; static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
void taosSetAllDebugFlag() { void taosSetAllDebugFlag() {
if (debugFlag != 0) { if (debugFlag != 0) {
mDebugFlag = debugFlag; mDebugFlag = debugFlag;
sdbDebugFlag = debugFlag; sdbDebugFlag = debugFlag;
dDebugFlag = debugFlag; dDebugFlag = debugFlag;
...@@ -309,7 +312,7 @@ void taosSetAllDebugFlag() { ...@@ -309,7 +312,7 @@ void taosSetAllDebugFlag() {
httpDebugFlag = debugFlag; httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag; mqttDebugFlag = debugFlag;
monDebugFlag = debugFlag; monDebugFlag = debugFlag;
qDebugFlag = debugFlag; qDebugFlag = debugFlag;
rpcDebugFlag = debugFlag; rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag; uDebugFlag = debugFlag;
sDebugFlag = debugFlag; sDebugFlag = debugFlag;
...@@ -321,12 +324,13 @@ void taosSetAllDebugFlag() { ...@@ -321,12 +324,13 @@ void taosSetAllDebugFlag() {
} }
bool taosCfgDynamicOptions(char *msg) { bool taosCfgDynamicOptions(char *msg) {
char *option, *value; char *option, *value;
int32_t olen, vlen; int32_t olen, vlen;
int32_t vint = 0; int32_t vint = 0;
paGetToken(msg, &option, &olen); paGetToken(msg, &option, &olen);
if (olen == 0) return false;; if (olen == 0) return false;
;
paGetToken(option + olen + 1, &value, &vlen); paGetToken(option + olen + 1, &value, &vlen);
if (vlen == 0) if (vlen == 0)
...@@ -339,9 +343,9 @@ bool taosCfgDynamicOptions(char *msg) { ...@@ -339,9 +343,9 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i; SGlobalCfg *cfg = tsGlobalConfig + i;
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
int32_t cfgLen = (int32_t)strlen(cfg->option); int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue; if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue;
...@@ -370,7 +374,7 @@ bool taosCfgDynamicOptions(char *msg) { ...@@ -370,7 +374,7 @@ bool taosCfgDynamicOptions(char *msg) {
return true; return true;
} }
if (strncasecmp(cfg->option, "debugFlag", olen) == 0) { if (strncasecmp(cfg->option, "debugFlag", olen) == 0) {
taosSetAllDebugFlag(); taosSetAllDebugFlag();
} }
return true; return true;
} }
...@@ -427,7 +431,7 @@ static void taosCheckDataDirCfg() { ...@@ -427,7 +431,7 @@ static void taosCheckDataDirCfg() {
} }
static int32_t taosCheckTmpDir(void) { static int32_t taosCheckTmpDir(void) {
if (strlen(tsTempDir) <= 0){ if (strlen(tsTempDir) <= 0) {
uError("tempDir is not set"); uError("tempDir is not set");
return -1; return -1;
} }
...@@ -448,7 +452,7 @@ static void doInitGlobalConfig(void) { ...@@ -448,7 +452,7 @@ static void doInitGlobalConfig(void) {
srand(taosSafeRand()); srand(taosSafeRand());
SGlobalCfg cfg = {0}; SGlobalCfg cfg = {0};
// ip address // ip address
cfg.option = "firstEp"; cfg.option = "firstEp";
cfg.ptr = tsFirst; cfg.ptr = tsFirst;
...@@ -577,12 +581,12 @@ static void doInitGlobalConfig(void) { ...@@ -577,12 +581,12 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsMaxNumOfDistinctResults; cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10*10000; cfg.minValue = 10 * 10000;
cfg.maxValue = 10000*10000; cfg.maxValue = 10000 * 10000;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "numOfMnodes"; cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes; cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
...@@ -1189,7 +1193,7 @@ static void doInitGlobalConfig(void) { ...@@ -1189,7 +1193,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// module configs // module configs
cfg.option = "flowctrl"; cfg.option = "flowctrl";
cfg.ptr = &tsEnableFlowCtrl; cfg.ptr = &tsEnableFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT8;
...@@ -1320,6 +1324,17 @@ static void doInitGlobalConfig(void) { ...@@ -1320,6 +1324,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// pContext in cache
cfg.option = "httpKeepAlive";
cfg.ptr = &tsHttpKeepAlive;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 3000;
cfg.maxValue = 3600000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// debug flag // debug flag
cfg.option = "numOfLogLines"; cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines; cfg.ptr = &tsNumOfLogLines;
...@@ -1401,7 +1416,6 @@ static void doInitGlobalConfig(void) { ...@@ -1401,7 +1416,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "sdbDebugFlag"; cfg.option = "sdbDebugFlag";
cfg.ptr = &sdbDebugFlag; cfg.ptr = &sdbDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
...@@ -1633,7 +1647,7 @@ static void doInitGlobalConfig(void) { ...@@ -1633,7 +1647,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// enable kill long query // enable kill long query
cfg.option = "deadLockKillQuery"; cfg.option = "deadLockKillQuery";
cfg.ptr = &tsDeadLockKillQuery; cfg.ptr = &tsDeadLockKillQuery;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT8;
...@@ -1665,6 +1679,17 @@ static void doInitGlobalConfig(void) { ...@@ -1665,6 +1679,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
// child talbe name specified in schemaless tag value
cfg.option = "smlChildTableName";
cfg.ptr = tsSmlChildTableName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = tListLen(tsSmlChildTableName);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
cfg.option = "walFlushSize"; cfg.option = "walFlushSize";
cfg.ptr = &tsdbWalFlushSize; cfg.ptr = &tsdbWalFlushSize;
...@@ -1731,21 +1756,18 @@ static void doInitGlobalConfig(void) { ...@@ -1731,21 +1756,18 @@ static void doInitGlobalConfig(void) {
#else #else
assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif #endif
} }
void taosInitGlobalCfg() { void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); }
pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
}
int32_t taosCheckGlobalCfg() { int32_t taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN]; char fqdn[TSDB_FQDN_LEN];
uint16_t port; uint16_t port;
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag(); taosSetAllDebugFlag();
} }
if (tsLocalFqdn[0] == 0) { if (tsLocalFqdn[0] == 0) {
taosGetFqdn(tsLocalFqdn); taosGetFqdn(tsLocalFqdn);
} }
...@@ -1772,7 +1794,7 @@ int32_t taosCheckGlobalCfg() { ...@@ -1772,7 +1794,7 @@ int32_t taosCheckGlobalCfg() {
if (taosCheckTmpDir()) { if (taosCheckTmpDir()) {
return -1; return -1;
} }
taosGetSystemInfo(); taosGetSystemInfo();
tsSetLocale(); tsSetLocale();
...@@ -1794,8 +1816,8 @@ int32_t taosCheckGlobalCfg() { ...@@ -1794,8 +1816,8 @@ int32_t taosCheckGlobalCfg() {
} }
if (tsMaxTablePerVnode < tsMinTablePerVnode) { if (tsMaxTablePerVnode < tsMinTablePerVnode) {
uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode,
tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode); tsMinTablePerVnode, tsMinTablePerVnode);
tsMaxTablePerVnode = tsMinTablePerVnode; tsMaxTablePerVnode = tsMinTablePerVnode;
} }
...@@ -1817,7 +1839,7 @@ int32_t taosCheckGlobalCfg() { ...@@ -1817,7 +1839,7 @@ int32_t taosCheckGlobalCfg() {
} }
tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035] tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035]
tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp
tsSyncPort = tsServerPort + TSDB_PORT_SYNC; tsSyncPort = tsServerPort + TSDB_PORT_SYNC;
tsHttpPort = tsServerPort + TSDB_PORT_HTTP; tsHttpPort = tsServerPort + TSDB_PORT_HTTP;
...@@ -1837,17 +1859,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { ...@@ -1837,17 +1859,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
strcpy(fqdn, ep); strcpy(fqdn, ep);
char *temp = strchr(fqdn, ':'); char *temp = strchr(fqdn, ':');
if (temp) { if (temp) {
*temp = 0; *temp = 0;
*port = atoi(temp+1); *port = atoi(temp + 1);
} }
if (*port == 0) { if (*port == 0) {
*port = tsServerPort; *port = tsServerPort;
return -1; return -1;
} }
return 0; return 0;
} }
/* /*
......
...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ...@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.36-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver") COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.35</version> <version>2.0.36</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.35</version> <version>2.0.36</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url> <url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
...@@ -58,6 +58,13 @@ ...@@ -58,6 +58,13 @@
<version>4.13.1</version> <version>4.13.1</version>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
<scope>test</scope>
</dependency>
</dependencies> </dependencies>
<build> <build>
...@@ -70,6 +77,18 @@ ...@@ -70,6 +77,18 @@
</resource> </resource>
</resources> </resources>
<plugins> <plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin> <plugin>
<groupId>org.apache.maven.plugins</groupId> <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId> <artifactId>maven-assembly-plugin</artifactId>
......
...@@ -135,7 +135,6 @@ public class TSDBDriver extends AbstractDriver { ...@@ -135,7 +135,6 @@ public class TSDBDriver extends AbstractDriver {
TSDBJNIConnector.init(props); TSDBJNIConnector.init(props);
return new TSDBConnection(props, this.dbMetaData); return new TSDBConnection(props, this.dbMetaData);
} catch (SQLWarning sqlWarning) { } catch (SQLWarning sqlWarning) {
sqlWarning.printStackTrace();
return new TSDBConnection(props, this.dbMetaData); return new TSDBConnection(props, this.dbMetaData);
} catch (SQLException sqlEx) { } catch (SQLException sqlEx) {
throw sqlEx; throw sqlEx;
......
...@@ -36,15 +36,15 @@ import java.util.regex.Pattern; ...@@ -36,15 +36,15 @@ import java.util.regex.Pattern;
* compatibility needs. * compatibility needs.
*/ */
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
// for jdbc preparedStatement interface
private String rawSql; private String rawSql;
private Object[] parameters; private Object[] parameters;
// for parameter binding
private ArrayList<ColumnInfo> colData; private long nativeStmtHandle = 0;
private String tableName;
private ArrayList<TableTagInfo> tableTags; private ArrayList<TableTagInfo> tableTags;
private int tagValueLength; private int tagValueLength;
private ArrayList<ColumnInfo> colData;
private String tableName;
private long nativeStmtHandle = 0;
TSDBPreparedStatement(TSDBConnection connection, String sql) { TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection); super(connection);
...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql(); preprocessSql();
} }
/*
*
*/
/** /**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override @Override
public void setObject(int parameterIndex, Object x) throws SQLException { public void setObject(int parameterIndex, Object x) throws SQLException {
if (isClosed()) { if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
} if (parameterIndex < 1 && parameterIndex >= parameters.length)
if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
}
parameters[parameterIndex - 1] = x; parameters[parameterIndex - 1] = x;
} }
...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
//TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
@Override @Override
...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible // NOTE: the following APIs are not JDBC compatible
// set the bind table name // parameter binding
private static class ColumnInfo { private static class ColumnInfo {
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
private ArrayList data; private ArrayList data;
...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
} }
} }
public void setTableName(String name) { public void setTableName(String name) throws SQLException {
if (this.tableName != null) {
this.columnDataExecuteBatch();
this.columnDataClearBatchInternal();
}
this.tableName = name; this.tableName = name;
} }
...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void columnDataExecuteBatch() throws SQLException { public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle); connector.executeBatch(this.nativeStmtHandle);
this.columnDataClearBatch(); this.columnDataClearBatchInternal();
} }
@Deprecated
public void columnDataClearBatch() { public void columnDataClearBatch() {
columnDataClearBatchInternal();
}
private void columnDataClearBatchInternal() {
int size = this.colData.size(); int size = this.colData.size();
this.colData.clear(); this.colData.clear();
this.colData.addAll(Collections.nCopies(size, null)); this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name this.tableName = null; // clear the table name
} }
public void columnDataCloseBatch() throws SQLException { public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle); connector.closeBatch(this.nativeStmtHandle);
...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.nativeStmtHandle = 0L; this.nativeStmtHandle = 0L;
this.tableName = null; this.tableName = null;
} }
@Override
public void close() throws SQLException {
this.columnDataClearBatchInternal();
this.columnDataCloseBatch();
super.close();
}
} }
...@@ -50,9 +50,13 @@ public class RestfulDriver extends AbstractDriver { ...@@ -50,9 +50,13 @@ public class RestfulDriver extends AbstractDriver {
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName());
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
} catch (UnsupportedEncodingException e) { } catch (UnsupportedEncodingException e) {
e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
} }
int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE));
boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE));
HttpClientPoolUtil.init(poolSize, keepAlive);
String result = HttpClientPoolUtil.execute(loginUrl); String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result); JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status"); String status = jsonResult.getString("status");
......
...@@ -5,12 +5,11 @@ import com.taosdata.jdbc.TSDBErrorNumbers; ...@@ -5,12 +5,11 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement; import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator; import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.NoHttpResponseException;
import org.apache.http.client.ClientProtocolException; import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*; import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ClientConnectionManager;
import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.CloseableHttpClient;
...@@ -21,21 +20,20 @@ import org.apache.http.protocol.HTTP; ...@@ -21,21 +20,20 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.concurrent.TimeUnit;
public class HttpClientPoolUtil { public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json"; private static final String DEFAULT_CONTENT_TYPE = "application/json";
private static final int DEFAULT_MAX_RETRY_COUNT = 5; private static final int DEFAULT_MAX_RETRY_COUNT = 5;
private static final int DEFAULT_MAX_TOTAL = 50; public static final String DEFAULT_HTTP_KEEP_ALIVE = "true";
private static final int DEFAULT_MAX_PER_ROUTE = 5; public static final String DEFAULT_MAX_PER_ROUTE = "20";
private static final int DEFAULT_HTTP_KEEP_TIME = -1; private static final int DEFAULT_HTTP_KEEP_TIME = -1;
private static String isKeepAlive;
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
...@@ -55,36 +53,39 @@ public class HttpClientPoolUtil { ...@@ -55,36 +53,39 @@ public class HttpClientPoolUtil {
private static CloseableHttpClient httpClient; private static CloseableHttpClient httpClient;
static { public static void init(Integer connPoolSize, boolean keepAlive) {
if (httpClient == null) {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); synchronized (HttpClientPoolUtil.class) {
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); if (httpClient == null) {
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); isKeepAlive = keepAlive ? HTTP.CONN_KEEP_ALIVE : HTTP.CONN_CLOSE;
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
httpClient = HttpClients.custom() connectionManager.setMaxTotal(connPoolSize * 10);
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) connectionManager.setDefaultMaxPerRoute(connPoolSize);
.setConnectionManager(connectionManager) httpClient = HttpClients.custom()
.setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT) .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
.build(); .setConnectionManager(connectionManager)
.setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT)
.build();
}
}
}
} }
/*** execute GET request ***/ /*** execute GET request ***/
public static String execute(String uri) throws SQLException { public static String execute(String uri) throws SQLException {
HttpEntity httpEntity = null; HttpEntity httpEntity = null;
String responseBody = ""; String responseBody = "";
try { HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); HttpContext context = HttpClientContext.create();
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context); try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) {
httpEntity = httpResponse.getEntity(); httpEntity = httpResponse.getEntity();
if (httpEntity != null) { if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} }
} catch (ClientProtocolException e) { } catch (ClientProtocolException e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) { } catch (IOException exception) {
exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally { } finally {
if (httpEntity != null) { if (httpEntity != null) {
...@@ -94,30 +95,27 @@ public class HttpClientPoolUtil { ...@@ -94,30 +95,27 @@ public class HttpClientPoolUtil {
return responseBody; return responseBody;
} }
/*** execute POST request ***/ /*** execute POST request ***/
public static String execute(String uri, String data, String token) throws SQLException { public static String execute(String uri, String data, String token) throws SQLException {
HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
method.setHeader(HTTP.CONN_DIRECTIVE, isKeepAlive);
method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
HttpContext context = HttpClientContext.create();
HttpEntity httpEntity = null; HttpEntity httpEntity = null;
String responseBody = ""; String responseBody = "";
try { try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) {
HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
method.setHeader("Authorization", "Taosd " + token);
method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity(); httpEntity = httpResponse.getEntity();
if (httpEntity == null) { if (httpEntity == null) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data);
} }
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} catch (ClientProtocolException e) { } catch (ClientProtocolException e) {
e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) { } catch (IOException exception) {
exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally { } finally {
if (httpEntity != null) { if (httpEntity != null) {
...@@ -148,4 +146,12 @@ public class HttpClientPoolUtil { ...@@ -148,4 +146,12 @@ public class HttpClientPoolUtil {
return method; return method;
} }
public static void reset() {
synchronized (HttpClientPoolUtil.class) {
ClientConnectionManager cm = httpClient.getConnectionManager();
cm.closeExpiredConnections();
cm.closeIdleConnections(100, TimeUnit.MILLISECONDS);
}
}
} }
\ No newline at end of file
...@@ -16,7 +16,6 @@ public class TaosInfo implements TaosInfoMBean { ...@@ -16,7 +16,6 @@ public class TaosInfo implements TaosInfoMBean {
MBeanServer server = ManagementFactory.getPlatformMBeanServer(); MBeanServer server = ManagementFactory.getPlatformMBeanServer();
ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo"); ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo");
server.registerMBean(TaosInfo.getInstance(), name); server.registerMBean(TaosInfo.getInstance(), name);
} catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) {
e.printStackTrace(); e.printStackTrace();
} }
......
...@@ -49,14 +49,9 @@ public class Utils { ...@@ -49,14 +49,9 @@ public class Utils {
try { try {
return parseMicroSecTimestamp(timeStampStr); return parseMicroSecTimestamp(timeStampStr);
} catch (DateTimeParseException ee) { } catch (DateTimeParseException ee) {
try { return parseNanoSecTimestamp(timeStampStr);
return parseNanoSecTimestamp(timeStampStr);
} catch (DateTimeParseException eee) {
eee.printStackTrace();
}
} }
} }
return null;
} }
private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException { private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException {
......
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class ParameterBindTest {
private static final String host = "127.0.0.1";
private static final String stable = "weather";
private Connection conn;
private final Random random = new Random(System.currentTimeMillis());
@Test
public void test() {
// given
String[] tbnames = {"t1", "t2", "t3"};
int rows = 10;
// when
insertIntoTables(tbnames, 10);
// then
assertRows(stable, tbnames.length * rows);
for (String t : tbnames) {
assertRows(t, rows);
}
}
@Test
public void testMultiThreads() {
// given
String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}};
int rows = 10;
// when
List<Thread> threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList());
threads.forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
// then
for (String[] table : tables) {
for (String t : table) {
assertRows(t, rows);
}
}
}
private void assertRows(String tbname, int rows) {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + tbname);
while (rs.next()) {
int count = rs.getInt(1);
Assert.assertEquals(rows, count);
}
} catch (SQLException e) {
e.printStackTrace();
}
}
private void insertIntoTables(String[] tbnames, int rowsEachTable) {
long current = System.currentTimeMillis();
String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 0; i < tbnames.length; i++) {
pstmt.setTableName(tbnames[i]);
pstmt.setTagInt(0, random.nextInt(100));
pstmt.setTagInt(1, random.nextInt(100));
ArrayList<Long> timestampList = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
timestampList.add(current + i * 1000 + j);
}
pstmt.setTimestamp(0, timestampList);
ArrayList<Integer> f1List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f1List.add(random.nextInt(100));
}
pstmt.setInt(1, f1List);
ArrayList<Integer> f2List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f2List.add(random.nextInt(100));
}
pstmt.setInt(2, f2List);
pstmt.columnDataAddBatch();
}
pstmt.columnDataExecuteBatch();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() {
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try {
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_pd");
stmt.execute("create database if not exists test_pd");
stmt.execute("use test_pd");
stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)");
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try {
// Statement stmt = conn.createStatement();
// stmt.execute("drop database if exists test_pd");
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
package com.taosdata.jdbc.rs;
import org.junit.Assert;
import org.junit.Test;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class HttpKeepAliveTest {
private static final String host = "127.0.0.1";
@Test
public void test() throws SQLException {
//given
int multi = 4000;
AtomicInteger exceptionCount = new AtomicInteger();
//when
Properties props = new Properties();
props.setProperty("httpKeepAlive", "false");
props.setProperty("httpPoolSize", "20");
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", props);
List<Thread> threads = IntStream.range(0, multi).mapToObj(i -> new Thread(
() -> {
try (Statement stmt = connection.createStatement()) {
stmt.execute("insert into log.tb_not_exists values(now, 1)");
stmt.execute("select last(*) from log.dn");
} catch (SQLException throwables) {
exceptionCount.getAndIncrement();
}
}
)).collect(Collectors.toList());
threads.forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
//then
Assert.assertEquals(multi, exceptionCount.get());
}
}
...@@ -6,8 +6,7 @@ import java.sql.*; ...@@ -6,8 +6,7 @@ import java.sql.*;
public class WasNullTest { public class WasNullTest {
// private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
private static final String host = "master";
private Connection conn; private Connection conn;
......
...@@ -2,8 +2,6 @@ package com.taosdata.jdbc.utils; ...@@ -2,8 +2,6 @@ package com.taosdata.jdbc.utils;
import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBError;
import org.junit.Test; import org.junit.Test;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
...@@ -11,7 +9,6 @@ import java.net.URLEncoder; ...@@ -11,7 +9,6 @@ import java.net.URLEncoder;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List; import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
...@@ -20,18 +17,21 @@ public class HttpClientPoolUtilTest { ...@@ -20,18 +17,21 @@ public class HttpClientPoolUtilTest {
String user = "root"; String user = "root";
String password = "taosdata"; String password = "taosdata";
String host = "127.0.0.1"; String host = "127.0.0.1";
String dbname = "log"; // String host = "master";
@Test @Test
public void test() { public void useLog() {
// given // given
List<Thread> threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> { int multi = 10;
useDB();
// try { // when
// TimeUnit.SECONDS.sleep(10); List<Thread> threads = IntStream.range(0, multi).mapToObj(i -> new Thread(() -> {
// } catch (InterruptedException e) { try {
// e.printStackTrace(); String token = login(multi);
// } executeOneSql("use log", token);
} catch (SQLException | UnsupportedEncodingException e) {
e.printStackTrace();
}
})).collect(Collectors.toList()); })).collect(Collectors.toList());
threads.forEach(Thread::start); threads.forEach(Thread::start);
...@@ -43,34 +43,62 @@ public class HttpClientPoolUtilTest { ...@@ -43,34 +43,62 @@ public class HttpClientPoolUtilTest {
e.printStackTrace(); e.printStackTrace();
} }
} }
} }
private void useDB() { @Test
try { public void tableNotExist() {
user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName()); // given
password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName()); int multi = 20;
String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + "";
String result = HttpClientPoolUtil.execute(loginUrl); // when
JSONObject jsonResult = JSON.parseObject(result); List<Thread> threads = IntStream.range(0, multi * 25).mapToObj(i -> new Thread(() -> {
String status = jsonResult.getString("status"); try {
String token = jsonResult.getString("desc"); // String token = "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04";
if (!status.equals("succ")) { String token = login(multi);
throw new SQLException(jsonResult.getString("desc")); executeOneSql("insert into log.tb_not_exist values(now, 1)", token);
executeOneSql("select last(*) from log.dn", token);
} catch (SQLException | UnsupportedEncodingException e) {
e.printStackTrace();
} }
})).collect(Collectors.toList());
String url = "http://" + host + ":6041/rest/sql"; threads.forEach(Thread::start);
String sql = "use " + dbname;
result = HttpClientPoolUtil.execute(url, sql, token);
JSONObject resultJson = JSON.parseObject(result); for (Thread thread : threads) {
if (resultJson.getString("status").equals("error")) { try {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
} }
} catch (UnsupportedEncodingException | SQLException e) {
e.printStackTrace();
} }
} }
private String login(int connPoolSize) throws SQLException, UnsupportedEncodingException {
user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName());
password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName());
String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + "";
HttpClientPoolUtil.init(connPoolSize, false);
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
String token = jsonResult.getString("desc");
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}
return token;
}
private boolean executeOneSql(String sql, String token) throws SQLException {
String url = "http://" + host + ":6041/rest/sql";
String result = HttpClientPoolUtil.execute(url, sql, token);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
// HttpClientPoolUtil.reset();
// throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
return false;
}
return true;
}
} }
\ No newline at end of file
#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog
org.apache.commons.logging.simplelog.defaultlog=TRACE
org.apache.commons.logging.simplelog.showlogname=true
org.apache.commons.logging.simplelog.showShortLogname=restful
org.apache.commons.logging.simplelog.showdatetime=true
org.apache.commons.logging.simplelog.dateTimeFormat=yyyy-mm-dd hh:MM:ss.SSS
\ No newline at end of file
...@@ -5,14 +5,27 @@ ...@@ -5,14 +5,27 @@
## Install ## Install
```sh You can use `pip` to install the connector from PyPI:
git clone --depth 1 https://github.com/taosdata/TDengine.git
pip install ./TDengine/src/connector/python ```bash
pip install taospy
```
Or with git url:
```bash
pip install git+https://github.com/taosdata/taos-connector-python.git
```
If you have installed TDengine server or client with prebuilt packages, then you can install the connector from path:
```bash
pip install /usr/local/taos/connector/python
``` ```
## Source Code ## Source Code
[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python). [TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Examples ## Examples
......
[tool.poetry] [tool.poetry]
name = "taos" name = "taospy"
version = "2.1.1" version = "2.1.2"
description = "TDengine connector for python" description = "TDengine connector for python"
authors = ["Taosdata Inc. <support@taosdata.com>"] authors = ["Taosdata Inc. <support@taosdata.com>"]
license = "AGPL-3.0" license = "AGPL-3.0"
readme = "README.md" readme = "README.md"
packages = [
{include = "taos"}
]
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^2.7 || ^3.4" python = "^2.7 || ^3.4"
...@@ -12,12 +15,12 @@ typing = "*" ...@@ -12,12 +15,12 @@ typing = "*"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
pytest = [ pytest = [
{ version = "^4.6", python = "^2.7" }, { version = "^4.6", python = ">=2.7,<3.0" },
{ version = "^6.2", python = "^3.7" } { version = "^6.2", python = ">=3.7,<4.0" }
] ]
pdoc = { version = "^7.1.1", python = "^3.7" } pdoc = { version = "^7.1.1", python = "^3.7" }
mypy = { version = "^0.910", python = "^3.6" } mypy = { version = "^0.910", python = "^3.6" }
black = { version = "^21.7b0", python = "^3.6" } black = [{ version = "^21.*", python = ">=3.6.2,<4.0" }]
[build-system] [build-system]
requires = ["poetry-core>=1.0.0"] requires = ["poetry-core>=1.0.0"]
......
...@@ -442,18 +442,14 @@ from .statement import * ...@@ -442,18 +442,14 @@ from .statement import *
from .subscription import * from .subscription import *
from .schemaless import * from .schemaless import *
try: from taos._version import __version__
import importlib.metadata
__version__ = importlib.metadata.version("taos")
except:
None
# Globals # Globals
threadsafety = 0 threadsafety = 0
paramstyle = "pyformat" paramstyle = "pyformat"
__all__ = [ __all__ = [
"__version__",
# functions # functions
"connect", "connect",
"new_bind_param", "new_bind_param",
......
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
import ctypes import ctypes
import platform import platform
import sys import inspect
from ctypes import * from ctypes import *
try: try:
from typing import Any from typing import Any
except: except:
...@@ -14,6 +15,7 @@ from .bind import * ...@@ -14,6 +15,7 @@ from .bind import *
from .field import * from .field import *
from .schemaless import * from .schemaless import *
_UNSUPPORTED = {}
# stream callback # stream callback
stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p) stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
...@@ -47,10 +49,13 @@ def _load_taos(): ...@@ -47,10 +49,13 @@ def _load_taos():
"Darwin": _load_taos_darwin, "Darwin": _load_taos_darwin,
"Windows": _load_taos_windows, "Windows": _load_taos_windows,
} }
pf = platform.system()
if load_func[pf] is None:
raise InterfaceError("unsupported platform: %s" % pf)
try: try:
return load_func[platform.system()]() return load_func[pf]()
except: except Exception as err:
raise InterfaceError('unsupported platform or failed to load taos client library') raise InterfaceError("unable to load taos C library: %s" % err)
_libtaos = _load_taos() _libtaos = _load_taos()
...@@ -65,7 +70,6 @@ _libtaos.taos_consume.restype = ctypes.c_void_p ...@@ -65,7 +70,6 @@ _libtaos.taos_consume.restype = ctypes.c_void_p
_libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int) _libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int)
_libtaos.taos_free_result.restype = None _libtaos.taos_free_result.restype = None
_libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) _libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
_libtaos.taos_schemaless_insert.restype = ctypes.c_void_p
try: try:
_libtaos.taos_stmt_errstr.restype = c_char_p _libtaos.taos_stmt_errstr.restype = c_char_p
...@@ -181,6 +185,7 @@ def taos_connect(host=None, user="root", password="taosdata", db=None, port=0): ...@@ -181,6 +185,7 @@ def taos_connect(host=None, user="root", password="taosdata", db=None, port=0):
raise ConnectionError("connect to TDengine failed") raise ConnectionError("connect to TDengine failed")
return connection return connection
_libtaos.taos_connect_auth.restype = c_void_p _libtaos.taos_connect_auth.restype = c_void_p
_libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16 _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16
...@@ -236,6 +241,7 @@ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0): ...@@ -236,6 +241,7 @@ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0):
raise ConnectionError("connect to TDengine failed") raise ConnectionError("connect to TDengine failed")
return connection return connection
_libtaos.taos_query.restype = c_void_p _libtaos.taos_query.restype = c_void_p
_libtaos.taos_query.argtypes = c_void_p, c_char_p _libtaos.taos_query.argtypes = c_void_p, c_char_p
...@@ -287,6 +293,7 @@ def taos_affected_rows(result): ...@@ -287,6 +293,7 @@ def taos_affected_rows(result):
"""The affected rows after runing query""" """The affected rows after runing query"""
return _libtaos.taos_affected_rows(result) return _libtaos.taos_affected_rows(result)
subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int) subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int)
_libtaos.taos_subscribe.restype = c_void_p _libtaos.taos_subscribe.restype = c_void_p
# _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int # _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int
...@@ -317,7 +324,7 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par ...@@ -317,7 +324,7 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par
_libtaos.taos_consume.restype = c_void_p _libtaos.taos_consume.restype = c_void_p
_libtaos.taos_consume.argstype = c_void_p, _libtaos.taos_consume.argstype = (c_void_p,)
def taos_consume(sub): def taos_consume(sub):
...@@ -503,13 +510,17 @@ def taos_stop_query(result): ...@@ -503,13 +510,17 @@ def taos_stop_query(result):
return _libtaos.taos_stop_query(result) return _libtaos.taos_stop_query(result)
_libtaos.taos_load_table_info.restype = c_int try:
_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p) _libtaos.taos_load_table_info.restype = c_int
_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p)
except Exception as err:
_UNSUPPORTED["taos_open_stream"] = err
def taos_load_table_info(connection, tables): def taos_load_table_info(connection, tables):
# type: (ctypes.c_void_p, str) -> None # type: (ctypes.c_void_p, str) -> None
"""Stop current query""" """Stop current query"""
_check_if_supported()
errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8"))) errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8")))
if errno != 0: if errno != 0:
msg = taos_errstr() msg = taos_errstr()
...@@ -562,12 +573,13 @@ def taos_select_db(connection, db): ...@@ -562,12 +573,13 @@ def taos_select_db(connection, db):
try: try:
_libtaos.taos_open_stream.restype = c_void_p _libtaos.taos_open_stream.restype = c_void_p
_libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any _libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any
except: except Exception as err:
pass _UNSUPPORTED["taos_open_stream"] = err
def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None): def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None):
# type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer # type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer
_check_if_supported()
if callback2 != None: if callback2 != None:
callback2 = stream_callback2_type(callback2) callback2 = stream_callback2_type(callback2)
"""Open an stream""" """Open an stream"""
...@@ -600,6 +612,7 @@ def taos_stmt_init(connection): ...@@ -600,6 +612,7 @@ def taos_stmt_init(connection):
""" """
return c_void_p(_libtaos.taos_stmt_init(connection)) return c_void_p(_libtaos.taos_stmt_init(connection))
_libtaos.taos_stmt_prepare.restype = c_int _libtaos.taos_stmt_prepare.restype = c_int
_libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int) _libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int)
...@@ -618,6 +631,7 @@ def taos_stmt_prepare(stmt, sql): ...@@ -618,6 +631,7 @@ def taos_stmt_prepare(stmt, sql):
_libtaos.taos_stmt_close.restype = c_int _libtaos.taos_stmt_close.restype = c_int
_libtaos.taos_stmt_close.argstype = (c_void_p,) _libtaos.taos_stmt_close.argstype = (c_void_p,)
def taos_stmt_close(stmt): def taos_stmt_close(stmt):
# type: (ctypes.c_void_p) -> None # type: (ctypes.c_void_p) -> None
"""Close a statement query """Close a statement query
...@@ -627,17 +641,12 @@ def taos_stmt_close(stmt): ...@@ -627,17 +641,12 @@ def taos_stmt_close(stmt):
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
try:
_libtaos.taos_stmt_errstr.restype = c_char_p
_libtaos.taos_stmt_errstr.argstype = (c_void_p,)
except AttributeError:
print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info())
try: try:
_libtaos.taos_stmt_errstr.restype = c_char_p _libtaos.taos_stmt_errstr.restype = c_char_p
_libtaos.taos_stmt_errstr.argstype = (c_void_p,) _libtaos.taos_stmt_errstr.argstype = (c_void_p,)
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info()) _UNSUPPORTED["taos_stmt_set_tbname"] = err
def taos_stmt_errstr(stmt): def taos_stmt_errstr(stmt):
...@@ -645,16 +654,17 @@ def taos_stmt_errstr(stmt): ...@@ -645,16 +654,17 @@ def taos_stmt_errstr(stmt):
"""Get error message from stetement query """Get error message from stetement query
@stmt: c_void_p TAOS_STMT* @stmt: c_void_p TAOS_STMT*
""" """
_check_if_supported()
err = c_char_p(_libtaos.taos_stmt_errstr(stmt)) err = c_char_p(_libtaos.taos_stmt_errstr(stmt))
if err: if err:
return err.value.decode("utf-8") return err.value.decode("utf-8")
try: try:
_libtaos.taos_stmt_set_tbname.restype = c_int _libtaos.taos_stmt_set_tbname.restype = c_int
_libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p) _libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p)
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname" % taos_get_client_info()) _UNSUPPORTED["taos_stmt_set_tbname"] = err
def taos_stmt_set_tbname(stmt, name): def taos_stmt_set_tbname(stmt, name):
...@@ -662,15 +672,17 @@ def taos_stmt_set_tbname(stmt, name): ...@@ -662,15 +672,17 @@ def taos_stmt_set_tbname(stmt, name):
"""Set table name of a statement query if exists. """Set table name of a statement query if exists.
@stmt: c_void_p TAOS_STMT* @stmt: c_void_p TAOS_STMT*
""" """
_check_if_supported()
res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8"))) res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8")))
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
try: try:
_libtaos.taos_stmt_set_tbname_tags.restype = c_int _libtaos.taos_stmt_set_tbname_tags.restype = c_int
_libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p) _libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p)
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname_tags" % taos_get_client_info()) _UNSUPPORTED["taos_stmt_set_tbname_tags"] = err
def taos_stmt_set_tbname_tags(stmt, name, tags): def taos_stmt_set_tbname_tags(stmt, name, tags):
...@@ -678,11 +690,13 @@ def taos_stmt_set_tbname_tags(stmt, name, tags): ...@@ -678,11 +690,13 @@ def taos_stmt_set_tbname_tags(stmt, name, tags):
"""Set table name with tags bind params. """Set table name with tags bind params.
@stmt: c_void_p TAOS_STMT* @stmt: c_void_p TAOS_STMT*
""" """
_check_if_supported()
res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags) res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags)
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
_libtaos.taos_stmt_is_insert.restype = c_int _libtaos.taos_stmt_is_insert.restype = c_int
_libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int)) _libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int))
...@@ -702,6 +716,7 @@ def taos_stmt_is_insert(stmt): ...@@ -702,6 +716,7 @@ def taos_stmt_is_insert(stmt):
_libtaos.taos_stmt_num_params.restype = c_int _libtaos.taos_stmt_num_params.restype = c_int
_libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int)) _libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int))
def taos_stmt_num_params(stmt): def taos_stmt_num_params(stmt):
# type: (ctypes.c_void_p) -> int # type: (ctypes.c_void_p) -> int
"""Params number of the current statement query. """Params number of the current statement query.
...@@ -713,6 +728,7 @@ def taos_stmt_num_params(stmt): ...@@ -713,6 +728,7 @@ def taos_stmt_num_params(stmt):
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
return num_params.value return num_params.value
_libtaos.taos_stmt_bind_param.restype = c_int _libtaos.taos_stmt_bind_param.restype = c_int
_libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p) _libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p)
...@@ -729,12 +745,12 @@ def taos_stmt_bind_param(stmt, bind): ...@@ -729,12 +745,12 @@ def taos_stmt_bind_param(stmt, bind):
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
try: try:
_libtaos.taos_stmt_bind_param_batch.restype = c_int _libtaos.taos_stmt_bind_param_batch.restype = c_int
_libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p) _libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p)
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_stmt_bind_param_batch" % taos_get_client_info()) _UNSUPPORTED["taos_stmt_bind_param_batch"] = err
def taos_stmt_bind_param_batch(stmt, bind): def taos_stmt_bind_param_batch(stmt, bind):
...@@ -745,15 +761,17 @@ def taos_stmt_bind_param_batch(stmt, bind): ...@@ -745,15 +761,17 @@ def taos_stmt_bind_param_batch(stmt, bind):
""" """
# ptr = ctypes.cast(bind, POINTER(TaosMultiBind)) # ptr = ctypes.cast(bind, POINTER(TaosMultiBind))
# ptr = pointer(bind) # ptr = pointer(bind)
_check_if_supported()
res = _libtaos.taos_stmt_bind_param_batch(stmt, bind) res = _libtaos.taos_stmt_bind_param_batch(stmt, bind)
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
try: try:
_libtaos.taos_stmt_bind_single_param_batch.restype = c_int _libtaos.taos_stmt_bind_single_param_batch.restype = c_int
_libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int) _libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int)
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_stmt_bind_single_param_batch" % taos_get_client_info()) _UNSUPPORTED["taos_stmt_bind_single_param_batch"] = err
def taos_stmt_bind_single_param_batch(stmt, bind, col): def taos_stmt_bind_single_param_batch(stmt, bind, col):
...@@ -763,6 +781,7 @@ def taos_stmt_bind_single_param_batch(stmt, bind, col): ...@@ -763,6 +781,7 @@ def taos_stmt_bind_single_param_batch(stmt, bind, col):
@bind: TAOS_MULTI_BIND* @bind: TAOS_MULTI_BIND*
@col: column index @col: column index
""" """
_check_if_supported()
res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col) res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col)
if res != 0: if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
...@@ -810,14 +829,17 @@ def taos_stmt_use_result(stmt): ...@@ -810,14 +829,17 @@ def taos_stmt_use_result(stmt):
raise StatementError(taos_stmt_errstr(stmt)) raise StatementError(taos_stmt_errstr(stmt))
return result return result
try: try:
_libtaos.taos_schemaless_insert.restype = c_void_p _libtaos.taos_schemaless_insert.restype = c_void_p
_libtaos.taos_schemaless_insert.argstype = c_void_p, c_void_p, c_int, c_int, c_int _libtaos.taos_schemaless_insert.argstype = c_void_p, c_void_p, c_int, c_int, c_int
except AttributeError: except Exception as err:
print("WARNING: libtaos(%s) does not support taos_schemaless_insert" % taos_get_client_info()) _UNSUPPORTED["taos_schemaless_insert"] = err
def taos_schemaless_insert(connection, lines, protocol, precision): def taos_schemaless_insert(connection, lines, protocol, precision):
# type: (c_void_p, list[str] | tuple(str), SmlProtocol, SmlPrecision) -> int # type: (c_void_p, list[str] | tuple(str), SmlProtocol, SmlPrecision) -> int
_check_if_supported()
num_of_lines = len(lines) num_of_lines = len(lines)
lines = (c_char_p(line.encode("utf-8")) for line in lines) lines = (c_char_p(line.encode("utf-8")) for line in lines)
lines_type = ctypes.c_char_p * num_of_lines lines_type = ctypes.c_char_p * num_of_lines
...@@ -833,6 +855,18 @@ def taos_schemaless_insert(connection, lines, protocol, precision): ...@@ -833,6 +855,18 @@ def taos_schemaless_insert(connection, lines, protocol, precision):
taos_free_result(res) taos_free_result(res)
return affected_rows return affected_rows
def _check_if_supported():
func = inspect.stack()[1][3]
if func in _UNSUPPORTED:
raise InterfaceError("C function %s is not supported in v%s: %s" % (func, taos_get_client_info(), _UNSUPPORTED[func]))
def unsupported_methods():
for m, e in range(_UNSUPPORTED):
print("unsupported %s: %s", m, e)
class CTaosInterface(object): class CTaosInterface(object):
def __init__(self, config=None): def __init__(self, config=None):
""" """
......
...@@ -3,6 +3,7 @@ PROJECT(TDengine) ...@@ -3,6 +3,7 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
...@@ -61,12 +62,22 @@ ENDIF () ...@@ -61,12 +62,22 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
IF (TD_LINUX) IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC}) ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC) IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdump taos_static cJson) IF (TD_AVRO_SUPPORT)
TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos_static)
ENDIF()
ELSE () ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos cJson) IF (TD_AVRO_SUPPORT)
TARGET_LINK_LIBRARIES(taosdump taos avro jansson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdump taos)
ENDIF ()
ENDIF () ENDIF ()
ENDIF () ENDIF ()
...@@ -74,8 +85,8 @@ IF (TD_DARWIN) ...@@ -74,8 +85,8 @@ IF (TD_DARWIN)
# missing <argp.h> for macosx # missing <argp.h> for macosx
# ADD_EXECUTABLE(taosdump ${SRC}) # ADD_EXECUTABLE(taosdump ${SRC})
# IF (TD_SOMODE_STATIC) # IF (TD_SOMODE_STATIC)
# TARGET_LINK_LIBRARIES(taosdump taos_static cJson) # TARGET_LINK_LIBRARIES(taosdump taos_static jansson)
# ELSE () # ELSE ()
# TARGET_LINK_LIBRARIES(taosdump taos cJson) # TARGET_LINK_LIBRARIES(taosdump taos jansson)
# ENDIF () # ENDIF ()
ENDIF () ENDIF ()
...@@ -28,15 +28,24 @@ ...@@ -28,15 +28,24 @@
#include "tsdb.h" #include "tsdb.h"
#include "tutil.h" #include "tutil.h"
#define AVRO_SUPPORT 0
#if AVRO_SUPPORT == 1 static char **g_tsDumpInSqlFiles = NULL;
static char g_tsCharset[63] = {0};
#ifdef AVRO_SUPPORT
#include <avro.h> #include <avro.h>
#endif #include <jansson.h>
static char **g_tsDumpInAvroFiles = NULL;
static void print_json_aux(json_t *element, int indent);
#endif /* AVRO_SUPPORT */
#define TSDB_SUPPORT_NANOSECOND 1 #define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
#define MAX_PATH_LEN 4096 // max path length on linux is 4095
#define COMMAND_SIZE 65536 #define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766 #define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql" //#define DEFAULT_DUMP_FILE "taosdump.sql"
...@@ -46,8 +55,6 @@ ...@@ -46,8 +55,6 @@
static int converStringToReadable(char *str, int size, char *buf, int bufsize); static int converStringToReadable(char *str, int size, char *buf, int bufsize);
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
static void dumpCharset(FILE *fp);
static void loadFileCharset(FILE *fp, char *fcharset);
typedef struct { typedef struct {
short bytes; short bytes;
...@@ -64,7 +71,12 @@ typedef struct { ...@@ -64,7 +71,12 @@ typedef struct {
#define performancePrint(fmt, ...) \ #define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \ do { if (g_args.performance_print) \
fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define warnPrint(fmt, ...) \
do { fprintf(stderr, "\033[33m"); \
fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \
fprintf(stderr, "\033[0m"); } while(0)
#define errorPrint(fmt, ...) \ #define errorPrint(fmt, ...) \
do { fprintf(stderr, "\033[31m"); \ do { fprintf(stderr, "\033[31m"); \
...@@ -208,14 +220,13 @@ typedef struct { ...@@ -208,14 +220,13 @@ typedef struct {
typedef struct { typedef struct {
pthread_t threadID; pthread_t threadID;
int32_t threadIndex; int32_t threadIndex;
int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN]; char stbName[TSDB_TABLE_NAME_LEN];
int precision; int precision;
TAOS *taos; TAOS *taos;
int64_t rowsOfDumpOut; int64_t rowsOfDumpOut;
int64_t tablesOfDumpOut; int64_t count;
int64_t tableFrom; int64_t from;
} threadInfo; } threadInfo;
typedef struct { typedef struct {
...@@ -225,6 +236,44 @@ typedef struct { ...@@ -225,6 +236,44 @@ typedef struct {
int32_t totalDatabasesOfDumpOut; int32_t totalDatabasesOfDumpOut;
} resultStatistics; } resultStatistics;
#ifdef AVRO_SUPPORT
enum enAvro_Codec {
AVRO_CODEC_START = 0,
AVRO_CODEC_NULL = AVRO_CODEC_START,
AVRO_CODEC_DEFLATE,
AVRO_CODEC_SNAPPY,
AVRO_CODEC_LZMA,
AVRO_CODEC_UNKNOWN = 255
};
char *g_avro_codec[] = {
"null",
"deflate",
"snappy",
"lzma",
"unknown"
};
/* avro sectin begin */
#define RECORD_NAME_LEN 64
#define FIELD_NAME_LEN 64
#define TYPE_NAME_LEN 16
typedef struct FieldStruct_S {
char name[FIELD_NAME_LEN];
char type[TYPE_NAME_LEN];
} FieldStruct;
typedef struct RecordSchema_S {
char name[RECORD_NAME_LEN];
char *fields;
int num_fields;
} RecordSchema;
/* avro section end */
#endif
static int64_t g_totalDumpOutRows = 0; static int64_t g_totalDumpOutRows = 0;
SDbInfo **g_dbInfos = NULL; SDbInfo **g_dbInfos = NULL;
...@@ -276,14 +325,17 @@ static struct argp_option options[] = { ...@@ -276,14 +325,17 @@ static struct argp_option options[] = {
// dump format options // dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 2}, {"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, {"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
{"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, #ifdef AVRO_SUPPORT
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3},
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4},
{"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, #endif
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10},
{"debug", 'g', 0, 0, "Print debug info.", 8}, {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10},
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10},
{"debug", 'g', 0, 0, "Print debug info.", 15},
{0} {0}
}; };
...@@ -310,7 +362,10 @@ typedef struct arguments { ...@@ -310,7 +362,10 @@ typedef struct arguments {
// dump format option // dump format option
bool schemaonly; bool schemaonly;
bool with_property; bool with_property;
#ifdef AVRO_SUPPORT
bool avro; bool avro;
int avro_codec;
#endif
int64_t start_time; int64_t start_time;
char humanStartTime[HUMAN_TIME_LEN]; char humanStartTime[HUMAN_TIME_LEN];
int64_t end_time; int64_t end_time;
...@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0}; ...@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL; static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1; static int g_numOfCores = 1;
static int dumpOut();
static int dumpIn();
static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
FILE *fp);
static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
FILE *fp, char* dbName);
static int getTableDes(
char* dbName, char *table,
TableDef *stableDes, bool isSuperTable);
static int64_t dumpTableData(FILE *fp, char *tbName,
char* dbName,
int precision,
char *jsonAvroSchema);
static int checkParam();
static void freeDbInfos();
struct arguments g_args = { struct arguments g_args = {
// connection option // connection option
NULL, NULL,
...@@ -381,7 +420,10 @@ struct arguments g_args = { ...@@ -381,7 +420,10 @@ struct arguments g_args = {
// dump format option // dump format option
false, // schemaonly false, // schemaonly
true, // with_property true, // with_property
false, // avro format #ifdef AVRO_SUPPORT
false, // avro
AVRO_CODEC_SNAPPY, // avro_codec
#endif
-INT64_MAX + 1, // start_time -INT64_MAX + 1, // start_time
{0}, // humanStartTime {0}, // humanStartTime
INT64_MAX, // end_time INT64_MAX, // end_time
...@@ -392,7 +434,7 @@ struct arguments g_args = { ...@@ -392,7 +434,7 @@ struct arguments g_args = {
1, // table_batch 1, // table_batch
false, // allow_sys false, // allow_sys
// other options // other options
5, // thread_num 8, // thread_num
0, // abort 0, // abort
NULL, // arg_list NULL, // arg_list
0, // arg_list_len 0, // arg_list_len
...@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
} }
break; break;
#ifdef AVRO_SUPPORT
case 'v':
g_args.avro = true;
break;
case 'd':
for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) {
if (0 == strcmp(arg, g_avro_codec[i])) {
g_args.avro_codec = i;
break;
}
}
break;
#endif
case 'r': case 'r':
g_args.resultFile = arg; g_args.resultFile = arg;
break; break;
...@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'N': case 'N':
g_args.with_property = false; g_args.with_property = false;
break; break;
case 'v':
g_args.avro = true;
break;
case 'S': case 'S':
// parse time here. // parse time here.
break; break;
...@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { ...@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.abort = 1; g_args.abort = 1;
break; break;
case ARGP_KEY_ARG: case ARGP_KEY_ARG:
g_args.arg_list = &state->argv[state->next - 1]; if (strlen(state->argv[state->next - 1])) {
g_args.arg_list_len = state->argc - state->next + 1; g_args.arg_list = &state->argv[state->next - 1];
g_args.arg_list_len = state->argc - state->next + 1;
}
state->next = state->argc; state->next = state->argc;
break; break;
...@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause( ...@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause(
for (; counter < numOfCols; counter++) { for (; counter < numOfCols; counter++) {
if (counter != count_temp) { if (counter != count_temp) {
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) { if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, ", \'%s\'", pstr += sprintf(pstr, ", \'%s\'",
...@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause( ...@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause(
pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
} }
} else { } else {
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) { if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value); pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value);
...@@ -1050,1902 +1106,2768 @@ static void dumpCreateMTableClause( ...@@ -1050,1902 +1106,2768 @@ static void dumpCreateMTableClause(
free(tmpBuf); free(tmpBuf);
} }
static int convertTbDesToAvroSchema( static int64_t getNtbCountOfStb(char *dbName, char *stbName)
char *dbName, char *tbName, TableDef *tableDes, int colCount,
char **avroSchema)
{ {
errorPrint("%s() LN%d TODO: covert table schema to avro schema\n", TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
__func__, __LINE__); dbName, g_args.port);
// { if (taos == NULL) {
// "namesapce": "database name", errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
// "type": "record",
// "name": "table name",
// "fields": [
// {
// "name": "col0 name",
// "type": "long"
// },
// {
// "name": "col1 name",
// "type": ["int", "null"]
// },
// {
// "name": "col2 name",
// "type": ["float", "null"]
// },
// ...
// {
// "name": "coln name",
// "type": ["string", "null"]
// }
// ]
// }
*avroSchema = (char *)calloc(1,
17 + TSDB_DB_NAME_LEN /* dbname section */
+ 17 /* type: record */
+ 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ 10 /* fields section */
+ (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
if (*avroSchema == NULL) {
errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
return -1; return -1;
} }
char *pstr = *avroSchema; int64_t count = 0;
pstr += sprintf(pstr,
"{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [", char command[COMMAND_SIZE];
dbName, tbName);
for (int i = 0; i < colCount; i ++) { sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
if (0 == i) {
pstr += sprintf(pstr, TAOS_RES *res = taos_query(taos, command);
"{\"name\": \"%s\", \"type\": \"%s\"", int32_t code = taos_errno(res);
tableDes->cols[i].field, "long"); if (code != 0) {
} else { errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
if (strcasecmp(tableDes->cols[i].type, "binary") == 0 || __func__, __LINE__, command, taos_errstr(res));
strcasecmp(tableDes->cols[i].type, "nchar") == 0) { taos_free_result(res);
pstr += sprintf(pstr, taos_close(taos);
"{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", return -1;
tableDes->cols[i].field, "string");
} else {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
tableDes->cols[i].field, tableDes->cols[i].type);
}
}
if ((i != (colCount -1))
&& (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
pstr += sprintf(pstr, "},");
} else {
pstr += sprintf(pstr, "}");
break;
}
} }
pstr += sprintf(pstr, "]}"); TAOS_ROW row = NULL;
debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema); if ((row = taos_fetch_row(res)) != NULL) {
count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
}
return 0; taos_close(taos);
return count;
} }
static int64_t dumpNormalTable( static int getTableDes(
char *dbName, TAOS *taos,
char *stable, char* dbName, char *table,
char *tbName, TableDef *tableDes, bool isSuperTable) {
int precision, TAOS_ROW row = NULL;
FILE *fp TAOS_RES* res = NULL;
) {
int colCount = 0; int colCount = 0;
TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) char sqlstr[COMMAND_SIZE];
+ sizeof(ColDes) * TSDB_MAX_COLUMNS); sprintf(sqlstr, "describe %s.%s;", dbName, table);
if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
colCount = getTableDes(dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create child-table using super-table res = taos_query(taos, sqlstr);
dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); int32_t code = taos_errno(res);
} else { // dump table definition if (code != 0) {
colCount = getTableDes(dbName, tbName, tableDes, false); errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
__func__, __LINE__, sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
if (colCount < 0) { TAOS_FIELD *fields = taos_fetch_fields(res);
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create normal-table or super-table tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
dumpCreateTableClause(tableDes, colCount, fp, dbName); while ((row = taos_fetch_row(res)) != NULL) {
tstrncpy(tableDes->cols[colCount].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
min(TSDB_COL_NAME_LEN + 1,
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
tstrncpy(tableDes->cols[colCount].type,
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[colCount].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(tableDes->cols[colCount].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
colCount++;
} }
char *jsonAvroSchema = NULL; taos_free_result(res);
if (g_args.avro) { res = NULL;
if (0 != convertTbDesToAvroSchema(
dbName, tbName, tableDes, colCount, &jsonAvroSchema)) {
errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n",
__func__,
__LINE__);
freeTbDes(tableDes);
return -1;
}
}
int64_t ret = 0; if (isSuperTable) {
if (!g_args.schemaonly) { return colCount;
ret = dumpTableData(fp, tbName, dbName, precision,
jsonAvroSchema);
} }
tfree(jsonAvroSchema); // if child-table have tag, using select tagName from table to get tagValue
freeTbDes(tableDes); for (int i = 0 ; i < colCount; i++) {
return ret; if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
}
static int64_t dumpNormalTableBelongStb( sprintf(sqlstr, "select %s from %s.%s",
SDbInfo *dbInfo, char *stbName, char *ntbName) tableDes->cols[i].field, dbName, table);
{
int64_t count = 0;
char tmpBuf[4096] = {0}; res = taos_query(taos, sqlstr);
FILE *fp = NULL; code = taos_errno(res);
if (code != 0) {
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
__func__, __LINE__, sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
}
if (g_args.outpath[0] != 0) { fields = taos_fetch_fields(res);
sprintf(tmpBuf, "%s/%s.%s.sql",
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w"); row = taos_fetch_row(res);
if (fp == NULL) { if (NULL == row) {
errorPrint("%s() LN%d, failed to open file %s\n", errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
__func__, __LINE__, tmpBuf); __func__, __LINE__, sqlstr, taos_errstr(res));
return -1; taos_free_result(res);
} taos_close(taos);
return -1;
}
count = dumpNormalTable( if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
dbInfo->name, sprintf(tableDes->cols[i].note, "%s", "NUL");
stbName, sprintf(tableDes->cols[i].value, "%s", "NULL");
ntbName, taos_free_result(res);
getPrecisionByString(dbInfo->precision), res = NULL;
fp); continue;
}
fclose(fp); int32_t* length = taos_fetch_lengths(res);
return count;
}
static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName) //int32_t* length = taos_fetch_lengths(tmpResult);
{ switch (fields[0].type) {
int64_t count = 0; case TSDB_DATA_TYPE_BOOL:
sprintf(tableDes->cols[i].value, "%d",
char tmpBuf[4096] = {0}; ((((int32_t)(*((char *)
FILE *fp = NULL; row[TSDB_SHOW_TABLES_NAME_INDEX])))==1)
?1:0));
break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(tableDes->cols[i].value, "%d",
*((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_SMALLINT:
sprintf(tableDes->cols[i].value, "%d",
*((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_INT:
sprintf(tableDes->cols[i].value, "%d",
*((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(tableDes->cols[i].value, "%" PRId64 "",
*((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_FLOAT:
sprintf(tableDes->cols[i].value, "%f",
GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_DOUBLE:
sprintf(tableDes->cols[i].value, "%f",
GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
break;
case TSDB_DATA_TYPE_BINARY:
memset(tableDes->cols[i].value, 0,
sizeof(tableDes->cols[i].value));
int len = strlen((char *)row[0]);
// FIXME for long value
if (len < (COL_VALUEBUF_LEN - 2)) {
converStringToReadable(
(char *)row[0],
length[0],
tableDes->cols[i].value,
len);
} else {
tableDes->cols[i].var_value = calloc(1, len * 2);
if (tableDes->cols[i].var_value == NULL) {
errorPrint("%s() LN%d, memory alalocation failed!\n",
__func__, __LINE__);
taos_free_result(res);
return -1;
}
converStringToReadable((char *)row[0],
length[0],
(char *)(tableDes->cols[i].var_value), len);
}
break;
if (g_args.outpath[0] != 0) { case TSDB_DATA_TYPE_NCHAR:
sprintf(tmpBuf, "%s/%s.%s.sql", {
g_args.outpath, dbInfo->name, ntbName); memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
} else { char tbuf[COMMAND_SIZE-2]; // need reserve 2 bytes for ' '
sprintf(tmpBuf, "%s.%s.sql", convertNCharToReadable(
dbInfo->name, ntbName); (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
} length[0], tbuf, COMMAND_SIZE-2);
sprintf(tableDes->cols[i].value, "%s", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
#if 0
if (!g_args.mysqlFlag) {
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
}
#endif
break;
default:
break;
}
fp = fopen(tmpBuf, "w"); taos_free_result(res);
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
} }
count = dumpNormalTable( return colCount;
dbInfo->name,
NULL,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
fclose(fp);
return count;
} }
static void *dumpNtbOfDb(void *arg) { static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
threadInfo *pThreadInfo = (threadInfo *)arg; FILE *fp, char* dbName) {
int counter = 0;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); int count_temp = 0;
debugPrint("dump table count = \t%"PRId64"\n", char sqlstr[COMMAND_SIZE];
pThreadInfo->tablesOfDumpOut);
FILE *fp = NULL; char* pstr = sqlstr;
char tmpBuf[4096] = {0};
if (g_args.outpath[0] != 0) { pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
sprintf(tmpBuf, "%s/%s.%d.sql", dbName, tableDes->name);
g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%d.sql",
pThreadInfo->dbName, pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w"); for (; counter < numOfCols; counter++) {
if (tableDes->cols[counter].note[0] != '\0') break;
if (fp == NULL) { if (counter == 0) {
errorPrint("%s() LN%d, failed to open file %s\n", pstr += sprintf(pstr, " (%s %s",
__func__, __LINE__, tmpBuf); tableDes->cols[counter].field, tableDes->cols[counter].type);
return NULL; } else {
} pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
int64_t count; if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) { || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
debugPrint("[%d] No.\t%"PRId64" table name: %s\n", pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
pThreadInfo->threadIndex, i,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name);
count = dumpNormalTable(
pThreadInfo->dbName,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable,
((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name,
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} }
} }
fclose(fp); count_temp = counter;
return NULL;
}
static void *dumpNormalTablesOfStb(void *arg) {
threadInfo *pThreadInfo = (threadInfo *)arg;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut);
char command[COMMAND_SIZE];
sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", for (; counter < numOfCols; counter++) {
pThreadInfo->dbName, pThreadInfo->stbName, if (counter == count_temp) {
pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom); pstr += sprintf(pstr, ") TAGS (%s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
} else {
pstr += sprintf(pstr, ", %s %s",
tableDes->cols[counter].field, tableDes->cols[counter].type);
}
TAOS_RES *res = taos_query(pThreadInfo->taos, command); if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
int32_t code = taos_errno(res); || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
if (code) { pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", }
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return NULL;
} }
FILE *fp = NULL; pstr += sprintf(pstr, ");");
char tmpBuf[4096] = {0};
if (g_args.outpath[0] != 0) { debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
sprintf(tmpBuf, "%s/%s.%s.%d.sql", return fprintf(fp, "%s\n\n", sqlstr);
g_args.outpath, }
pThreadInfo->dbName,
pThreadInfo->stbName,
pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%s.%d.sql",
pThreadInfo->dbName,
pThreadInfo->stbName,
pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w"); static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp)
{
uint64_t sizeOfTableDes =
(uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
if (fp == NULL) { TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
errorPrint("%s() LN%d, failed to open file %s\n", if (NULL == tableDes) {
__func__, __LINE__, tmpBuf); errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
return NULL; __func__, __LINE__, sizeOfTableDes);
exit(-1);
} }
TAOS_ROW row = NULL; int colCount = getTableDes(taos, dbInfo->name,
int64_t i = 0; stbName, tableDes, true);
int64_t count;
while((row = taos_fetch_row(res)) != NULL) {
debugPrint("[%d] sub table %"PRId64": name: %s\n",
pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
count = dumpNormalTable( if (colCount < 0) {
pThreadInfo->dbName, free(tableDes);
pThreadInfo->stbName, errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], __func__, __LINE__, stbName);
pThreadInfo->precision, exit(-1);
fp);
if (count < 0) {
break;
}
} }
fclose(fp); dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
return NULL; free(tableDes);
return 0;
} }
static int64_t dumpNtbOfDbByThreads( static int64_t dumpCreateSTableClauseOfDb(
SDbInfo *dbInfo, SDbInfo *dbInfo, FILE *fp)
int64_t ntbCount)
{ {
if (ntbCount <= 0) { TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0; return 0;
} }
int threads = g_args.thread_num; TAOS_ROW row;
char command[COMMAND_SIZE] = {0};
int64_t a = ntbCount / threads; sprintf(command, "SHOW %s.STABLES", dbInfo->name);
if (a < 1) {
threads = ntbCount;
a = 1;
}
assert(threads); TAOS_RES* res = taos_query(taos, command);
int64_t b = ntbCount % threads; int32_t code = taos_errno(res);
if (code != 0) {
threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); __func__, __LINE__, command, taos_errstr(res));
assert(pids); taos_free_result(res);
assert(infos); taos_close(taos);
exit(-1);
for (int64_t i = 0; i < threads; i++) { }
threadInfo *pThreadInfo = infos + i;
pThreadInfo->taos = taos_connect(
g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
return -1; int64_t superTblCnt = 0;
while ((row = taos_fetch_row(res)) != NULL) {
if (0 == dumpStableClasuse(taos, dbInfo,
row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
superTblCnt ++;
} }
pThreadInfo->threadIndex = i;
pThreadInfo->tablesOfDumpOut = (i<b)?a+1:a;
pThreadInfo->tableFrom = (i==0)?0:
((threadInfo *)(infos + i - 1))->tableFrom +
((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
} }
for (int64_t i = 0; i < threads; i++) { taos_free_result(res);
pthread_join(pids[i], NULL);
}
for (int64_t i = 0; i < threads; i++) { fprintf(g_fpOfResult,
threadInfo *pThreadInfo = infos + i; "# super table counter: %"PRId64"\n",
taos_close(pThreadInfo->taos); superTblCnt);
} g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
free(pids); taos_close(taos);
free(infos);
return 0; return superTblCnt;
} }
static int64_t getNtbCountOfStb(char *dbName, char *stbName) static void dumpCreateDbClause(
{ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, char sqlstr[TSDB_MAX_SQL_LEN] = {0};
dbName, g_args.port);
if (taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
return -1;
}
int64_t count = 0; char *pstr = sqlstr;
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
"REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
dbInfo->replica, dbInfo->quorum, dbInfo->days,
dbInfo->keeplist,
dbInfo->cache,
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
dbInfo->fsync,
dbInfo->cachelast,
dbInfo->comp, dbInfo->precision, dbInfo->update);
}
char command[COMMAND_SIZE]; pstr += sprintf(pstr, ";");
fprintf(fp, "%s\n\n", sqlstr);
}
sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); static FILE* openDumpInFile(char *fptr) {
wordexp_t full_path;
TAOS_RES *res = taos_query(taos, command); if (wordexp(fptr, &full_path, 0) != 0) {
int32_t code = taos_errno(res); errorPrint("illegal file name: %s\n", fptr);
if (code != 0) { return NULL;
errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
} }
TAOS_ROW row = NULL; char *fname = full_path.we_wordv[0];
if ((row = taos_fetch_row(res)) != NULL) { FILE *f = NULL;
count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; if ((fname) && (strlen(fname) > 0)) {
f = fopen(fname, "r");
if (f == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, fname);
}
} }
taos_close(taos); wordfree(&full_path);
return count; return f;
} }
static int64_t dumpNtbOfStbByThreads( static uint64_t getFilesNum(char *ext)
SDbInfo *dbInfo, char *stbName)
{ {
int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); uint64_t count = 0;
if (ntbCount <= 0) { int namelen, extlen;
return 0; struct dirent *pDirent;
} DIR *pDir;
int threads = g_args.thread_num;
int64_t a = ntbCount / threads;
if (a < 1) {
threads = ntbCount;
a = 1;
}
assert(threads); extlen = strlen(ext);
int64_t b = ntbCount % threads;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); bool isSql = (0 == strcmp(ext, "sql"));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
assert(pids);
assert(infos);
for (int64_t i = 0; i < threads; i++) { pDir = opendir(g_args.inpath);
threadInfo *pThreadInfo = infos + i; if (pDir != NULL) {
pThreadInfo->taos = taos_connect( while ((pDirent = readdir(pDir)) != NULL) {
g_args.host, namelen = strlen (pDirent->d_name);
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
return -1; if (namelen > extlen) {
if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
if (isSql) {
if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
continue;
}
}
verbosePrint("%s found\n", pDirent->d_name);
count ++;
}
}
} }
closedir (pDir);
pThreadInfo->threadIndex = i;
pThreadInfo->tablesOfDumpOut = (i<b)?a+1:a;
pThreadInfo->tableFrom = (i==0)?0:
((threadInfo *)(infos + i - 1))->tableFrom +
((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
strcpy(pThreadInfo->stbName, stbName);
pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
}
for (int64_t i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
} }
int64_t records = 0; debugPrint("%"PRId64" .%s files found!\n", count, ext);
for (int64_t i = 0; i < threads; i++) { return count;
threadInfo *pThreadInfo = infos + i;
records += pThreadInfo->rowsOfDumpOut;
taos_close(pThreadInfo->taos);
}
free(pids);
free(infos);
return records;
} }
static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp) static void freeFileList(char **fileList, int64_t count)
{ {
uint64_t sizeOfTableDes = for (int64_t i = 0; i < count; i++) {
(uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); tfree(fileList[i]);
TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
if (NULL == tableDes) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
__func__, __LINE__, sizeOfTableDes);
exit(-1);
}
int colCount = getTableDes(dbInfo->name,
stbName, tableDes, true);
if (colCount < 0) {
free(tableDes);
errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
__func__, __LINE__, stbName);
exit(-1);
} }
tfree(fileList);
dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
free(tableDes);
return 0;
} }
static int64_t dumpCreateSTableClauseOfDb( static void createDumpinList(char *ext, int64_t count)
SDbInfo *dbInfo, FILE *fp)
{ {
TAOS *taos = taos_connect(g_args.host, bool isSql = (0 == strcmp(ext, "sql"));
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
}
TAOS_ROW row;
char command[COMMAND_SIZE] = {0};
sprintf(command, "SHOW %s.STABLES", dbInfo->name);
TAOS_RES* res = taos_query(taos, command); if (isSql) {
int32_t code = taos_errno(res); g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *));
if (code != 0) { assert(g_tsDumpInSqlFiles);
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
exit(-1);
}
int64_t superTblCnt = 0; for (int64_t i = 0; i < count; i++) {
while ((row = taos_fetch_row(res)) != NULL) { g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { assert(g_tsDumpInSqlFiles[i]);
superTblCnt ++;
} }
} }
#ifdef AVRO_SUPPORT
else {
g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *));
assert(g_tsDumpInAvroFiles);
taos_free_result(res); for (int64_t i = 0; i < count; i++) {
g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
assert(g_tsDumpInAvroFiles[i]);
}
fprintf(g_fpOfResult, }
"# super table counter: %"PRId64"\n", #endif
superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
taos_close(taos); int namelen, extlen;
struct dirent *pDirent;
DIR *pDir;
extlen = strlen(ext);
count = 0;
pDir = opendir(g_args.inpath);
if (pDir != NULL) {
while ((pDirent = readdir(pDir)) != NULL) {
namelen = strlen (pDirent->d_name);
if (namelen > extlen) {
if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
verbosePrint("%s found\n", pDirent->d_name);
if (isSql) {
if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
continue;
}
strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
}
#ifdef AVRO_SUPPORT
else {
strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
}
#endif
}
}
}
closedir (pDir);
}
return superTblCnt; debugPrint("%"PRId64" .%s files filled to list!\n", count, ext);
} }
static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) #ifdef AVRO_SUPPORT
{
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
}
char command[COMMAND_SIZE]; static int convertTbDesToJson(
TAOS_RES *result; char *dbName, char *tbName, TableDef *tableDes, int colCount,
int32_t code; char **jsonSchema)
{
// {
// "type": "record",
// "name": "dbname.tbname",
// "fields": [
// {
// "name": "col0 name",
// "type": "long"
// },
// {
// "name": "col1 name",
// "type": "int"
// },
// {
// "name": "col2 name",
// "type": "float"
// },
// {
// "name": "col3 name",
// "type": "boolean"
// },
// ...
// {
// "name": "coln name",
// "type": "string"
// }
// ]
// }
*jsonSchema = (char *)calloc(1,
17 + TSDB_DB_NAME_LEN /* dbname section */
+ 17 /* type: record */
+ 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ 10 /* fields section */
+ (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
if (*jsonSchema == NULL) {
errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
return -1;
}
sprintf(command, "USE %s", dbInfo->name); char *pstr = *jsonSchema;
result = taos_query(taos, command); pstr += sprintf(pstr,
code = taos_errno(result); "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [",
if (code != 0) { dbName, tbName);
errorPrint("invalid database %s, reason: %s\n", for (int i = 0; i < colCount; i ++) {
dbInfo->name, taos_errstr(result)); if (0 == i) {
taos_close(taos); pstr += sprintf(pstr,
return 0; "{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else {
if (strcasecmp(tableDes->cols[i].type, "binary") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "string");
} else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "bytes");
} else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "boolean");
} else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "int");
} else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "int");
} else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field, "long");
} else {
pstr += sprintf(pstr,
"{\"name\": \"%s\", \"type\": \"%s\"",
tableDes->cols[i].field,
strtolower(tableDes->cols[i].type, tableDes->cols[i].type));
}
}
if ((i != (colCount -1))
&& (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
pstr += sprintf(pstr, "},");
} else {
pstr += sprintf(pstr, "}");
break;
}
} }
sprintf(command, "SHOW TABLES"); pstr += sprintf(pstr, "]}");
result = taos_query(taos, command);
code = taos_errno(result); debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema);
if (code != 0) {
errorPrint("Failed to show %s\'s tables, reason: %s\n", return 0;
dbInfo->name, taos_errstr(result)); }
taos_close(taos);
return 0; static void print_json_indent(int indent) {
int i;
for (i = 0; i < indent; i++) {
putchar(' ');
} }
}
g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); const char *json_plural(size_t count) { return count == 1 ? "" : "s"; }
TAOS_ROW row; static void print_json_object(json_t *element, int indent) {
int64_t count = 0; size_t size;
while(NULL != (row = taos_fetch_row(result))) { const char *key;
debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", json_t *value;
__func__, __LINE__,
count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); print_json_indent(indent);
tstrncpy(((TableInfo *)(g_tablesList + count))->name, size = json_object_size(element);
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size));
if (stbName) { json_object_foreach(element, key, value) {
tstrncpy(((TableInfo *)(g_tablesList + count))->stable, print_json_indent(indent + 2);
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); printf("JSON Key: \"%s\"\n", key);
((TableInfo *)(g_tablesList + count))->belongStb = true; print_json_aux(value, indent + 2);
}
count ++;
} }
taos_close(taos); }
int64_t records = dumpNtbOfDbByThreads(dbInfo, count); static void print_json_array(json_t *element, int indent) {
size_t i;
size_t size = json_array_size(element);
print_json_indent(indent);
free(g_tablesList); printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size));
g_tablesList = NULL; for (i = 0; i < size; i++) {
print_json_aux(json_array_get(element, i), indent + 2);
}
}
return records; static void print_json_string(json_t *element, int indent) {
print_json_indent(indent);
printf("JSON String: \"%s\"\n", json_string_value(element));
} }
static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) static void print_json_integer(json_t *element, int indent) {
{ print_json_indent(indent);
dumpCreateDbClause(dbInfo, g_args.with_property, fp); printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element));
}
fprintf(g_fpOfResult, "\n#### database: %s\n", static void print_json_real(json_t *element, int indent) {
dbInfo->name); print_json_indent(indent);
g_resultStatistics.totalDatabasesOfDumpOut++; printf("JSON Real: %f\n", json_real_value(element));
}
dumpCreateSTableClauseOfDb(dbInfo, fp); static void print_json_true(json_t *element, int indent) {
(void)element;
print_json_indent(indent);
printf("JSON True\n");
}
return dumpNTablesOfDb(dbInfo); static void print_json_false(json_t *element, int indent) {
(void)element;
print_json_indent(indent);
printf("JSON False\n");
} }
static int dumpOut() { static void print_json_null(json_t *element, int indent) {
TAOS *taos = NULL; (void)element;
TAOS_RES *result = NULL; print_json_indent(indent);
printf("JSON Null\n");
}
TAOS_ROW row; static void print_json_aux(json_t *element, int indent)
FILE *fp = NULL; {
int32_t count = 0; switch(json_typeof(element)) {
case JSON_OBJECT:
print_json_object(element, indent);
break;
char tmpBuf[4096] = {0}; case JSON_ARRAY:
if (g_args.outpath[0] != 0) { print_json_array(element, indent);
sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); break;
} else {
sprintf(tmpBuf, "dbs.sql");
}
fp = fopen(tmpBuf, "w"); case JSON_STRING:
if (fp == NULL) { print_json_string(element, indent);
errorPrint("%s() LN%d, failed to open file %s\n", break;
__func__, __LINE__, tmpBuf);
return -1; case JSON_INTEGER:
print_json_integer(element, indent);
break;
case JSON_REAL:
print_json_real(element, indent);
break;
case JSON_TRUE:
print_json_true(element, indent);
break;
case JSON_FALSE:
print_json_false(element, indent);
break;
case JSON_NULL:
print_json_null(element, indent);
break;
default:
fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element));
} }
}
g_args.dumpDbCount = getDumpDbCount(); static void print_json(json_t *root) { print_json_aux(root, 0); }
debugPrint("%s() LN%d, dump db count: %d\n",
__func__, __LINE__, g_args.dumpDbCount);
if (0 == g_args.dumpDbCount) { static json_t *load_json(char *jsonbuf)
errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); {
fclose(fp); json_t *root;
return -1; json_error_t error;
root = json_loads(jsonbuf, 0, &error);
if (root) {
return root;
} else {
fprintf(stderr, "json error on line %d: %s\n", error.line, error.text);
return NULL;
} }
}
g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); static RecordSchema *parse_json_to_recordschema(json_t *element)
if (g_dbInfos == NULL) { {
errorPrint("%s() LN%d, failed to allocate memory\n", RecordSchema *recordSchema = malloc(sizeof(RecordSchema));
assert(recordSchema);
if (JSON_OBJECT != json_typeof(element)) {
fprintf(stderr, "%s() LN%d, json passed is not an object\n",
__func__, __LINE__); __func__, __LINE__);
goto _exit_failure; return NULL;
} }
char command[COMMAND_SIZE]; const char *key;
json_t *value;
json_object_foreach(element, key, value) {
if (0 == strcmp(key, "name")) {
tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1);
} else if (0 == strcmp(key, "fields")) {
if (JSON_ARRAY == json_typeof(value)) {
size_t i;
size_t size = json_array_size(value);
verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n",
__func__, __LINE__,
(long long)size, json_plural(size));
recordSchema->num_fields = size;
recordSchema->fields = malloc(sizeof(FieldStruct) * size);
assert(recordSchema->fields);
for (i = 0; i < size; i++) {
FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
json_t *arr_element = json_array_get(value, i);
const char *ele_key;
json_t *ele_value;
json_object_foreach(arr_element, ele_key, ele_value) {
if (0 == strcmp(ele_key, "name")) {
tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1);
} else if (0 == strcmp(ele_key, "type")) {
if (JSON_STRING == json_typeof(ele_value)) {
tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1);
} else if (JSON_OBJECT == json_typeof(ele_value)) {
const char *obj_key;
json_t *obj_value;
json_object_foreach(ele_value, obj_key, obj_value) {
if (0 == strcmp(obj_key, "type")) {
if (JSON_STRING == json_typeof(obj_value)) {
tstrncpy(field->type,
json_string_value(obj_value), TYPE_NAME_LEN-1);
}
}
}
}
}
}
}
} else {
fprintf(stderr, "%s() LN%d, fields have no array\n",
__func__, __LINE__);
return NULL;
}
/* Connect to server */ break;
taos = taos_connect(g_args.host, g_args.user, g_args.password, }
NULL, g_args.port);
if (taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
goto _exit_failure;
} }
/* --------------------------------- Main Code -------------------------------- */ return recordSchema;
/* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ }
/* */
dumpCharset(fp);
sprintf(command, "show databases"); static void freeRecordSchema(RecordSchema *recordSchema)
result = taos_query(taos, command); {
int32_t code = taos_errno(result); if (recordSchema) {
if (recordSchema->fields) {
free(recordSchema->fields);
}
free(recordSchema);
}
}
if (code != 0) { static int64_t writeResultToAvro(
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", char *avroFilename,
__func__, __LINE__, command, taos_errstr(result)); char *jsonSchema,
goto _exit_failure; TAOS_RES *res)
{
avro_schema_t schema;
if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) {
errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n",
__func__, __LINE__, jsonSchema, avro_strerror());
exit(EXIT_FAILURE);
} }
TAOS_FIELD *fields = taos_fetch_fields(result); json_t *json_root = load_json(jsonSchema);
debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
while ((row = taos_fetch_row(result)) != NULL) { RecordSchema *recordSchema;
// sys database name : 'log', but subsequent version changed to 'log' if (json_root) {
if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", if (g_args.debug_print || g_args.verbose_print) {
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) print_json(json_root);
&& (!g_args.allow_sys)) {
continue;
} }
if (g_args.databases) { // input multi dbs recordSchema = parse_json_to_recordschema(json_root);
if (inDatabasesSeq( if (NULL == recordSchema) {
(char *)row[TSDB_SHOW_DB_NAME_INDEX], fprintf(stderr, "Failed to parse json to recordschema\n");
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { exit(EXIT_FAILURE);
continue;
}
} else if (!g_args.all_databases) { // only input one db
if (strncasecmp(g_args.arg_list[0],
(char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
continue;
} }
g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); json_decref(json_root);
if (g_dbInfos[count] == NULL) { } else {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema);
__func__, __LINE__, (uint64_t)sizeof(SDbInfo)); exit(EXIT_FAILURE);
goto _exit_failure; }
}
okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); avro_file_writer_t db;
tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
min(TSDB_DB_NAME_LEN,
fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
g_dbInfos[count]->ntables =
*((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups =
*((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
g_dbInfos[count]->replica =
*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
g_dbInfos[count]->quorum =
*((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days =
*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
tstrncpy(g_dbInfos[count]->keeplist,
(char *)row[TSDB_SHOW_DB_KEEP_INDEX],
min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
g_dbInfos[count]->cache =
*((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
g_dbInfos[count]->blocks =
*((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
g_dbInfos[count]->minrows =
*((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
g_dbInfos[count]->maxrows =
*((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
g_dbInfos[count]->wallevel =
*((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
g_dbInfos[count]->fsync =
*((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
g_dbInfos[count]->comp =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
tstrncpy(g_dbInfos[count]->precision,
(char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
DB_PRECISION_LEN);
g_dbInfos[count]->update =
*((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
count++;
if (g_args.databases) { int rval = avro_file_writer_create_with_codec
if (count > g_args.dumpDbCount) (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0);
break; if (rval) {
} else if (!g_args.all_databases) { errorPrint("There was an error creating %s. reason: %s\n",
if (count >= 1) avroFilename, avro_strerror());
break; exit(EXIT_FAILURE);
}
} }
if (count == 0) { TAOS_ROW row = NULL;
errorPrint("%d databases valid to dump\n", count);
goto _exit_failure;
}
taos_close(taos); int numFields = taos_field_count(res);
assert(numFields > 0);
TAOS_FIELD *fields = taos_fetch_fields(res);
if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases avro_value_iface_t *wface =
for (int i = 0; i < count; i++) { avro_generic_class_from_schema(schema);
int64_t records = 0;
records = dumpWholeDatabase(g_dbInfos[i], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[i]->name);
g_totalDumpOutRows += records;
}
}
} else {
if (1 == g_args.arg_list_len) {
int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[0]->name);
g_totalDumpOutRows += records;
}
} else {
dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
}
int superTblCnt = 0 ; avro_value_t record;
for (int i = 1; g_args.arg_list[i]; i++) { avro_generic_value_new(wface, &record);
TableRecordInfo tableRecordInfo;
if (getTableRecordInfo(g_dbInfos[0]->name, int64_t count = 0;
g_args.arg_list[i], while ((row = taos_fetch_row(res)) != NULL) {
&tableRecordInfo) < 0) { avro_value_t value;
errorPrint("input the invalid table %s\n",
g_args.arg_list[i]); for (int col = 0; col < numFields; col++) {
if (0 != avro_value_get_by_name(
&record, fields[col].name, &value, NULL)) {
errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed",
__func__, __LINE__, fields[col].name);
continue; continue;
} }
int64_t records = 0; int len;
if (tableRecordInfo.isStb) { // dump all table of this stable switch (fields[col].type) {
int ret = dumpStableClasuse( case TSDB_DATA_TYPE_BOOL:
g_dbInfos[0], if (NULL == row[col]) {
tableRecordInfo.tableRecord.stable, avro_value_set_int(&value, TSDB_DATA_BOOL_NULL);
fp); } else {
if (ret >= 0) { avro_value_set_boolean(&value,
superTblCnt++; ((((int32_t)(*((char *)row[col])))==1)?1:0));
records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); }
} break;
} else if (tableRecordInfo.belongStb){
dumpStableClasuse(
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp);
records = dumpNormalTableBelongStb(
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
g_args.arg_list[i]);
} else {
records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]);
}
if (records >= 0) { case TSDB_DATA_TYPE_TINYINT:
okPrint("table: %s dumped\n", g_args.arg_list[i]); if (NULL == row[col]) {
g_totalDumpOutRows += records; avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL);
} else {
avro_value_set_int(&value, *((int8_t *)row[col]));
}
break;
case TSDB_DATA_TYPE_SMALLINT:
if (NULL == row[col]) {
avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL);
} else {
avro_value_set_int(&value, *((int16_t *)row[col]));
}
break;
case TSDB_DATA_TYPE_INT:
if (NULL == row[col]) {
avro_value_set_int(&value, TSDB_DATA_INT_NULL);
} else {
avro_value_set_int(&value, *((int32_t *)row[col]));
}
break;
case TSDB_DATA_TYPE_BIGINT:
if (NULL == row[col]) {
avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
} else {
avro_value_set_long(&value, *((int64_t *)row[col]));
}
break;
case TSDB_DATA_TYPE_FLOAT:
if (NULL == row[col]) {
avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL);
} else {
avro_value_set_float(&value, GET_FLOAT_VAL(row[col]));
}
break;
case TSDB_DATA_TYPE_DOUBLE:
if (NULL == row[col]) {
avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL);
} else {
avro_value_set_double(&value, GET_DOUBLE_VAL(row[col]));
}
break;
case TSDB_DATA_TYPE_BINARY:
if (NULL == row[col]) {
avro_value_set_string(&value,
(char *)NULL);
} else {
avro_value_set_string(&value, (char *)row[col]);
}
break;
case TSDB_DATA_TYPE_NCHAR:
if (NULL == row[col]) {
avro_value_set_bytes(&value,
(void*)NULL,0);
} else {
len = strlen((char*)row[col]);
avro_value_set_bytes(&value, (void*)(row[col]),len);
}
break;
case TSDB_DATA_TYPE_TIMESTAMP:
if (NULL == row[col]) {
avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
} else {
avro_value_set_long(&value, *((int64_t *)row[col]));
}
break;
default:
break;
} }
} }
if (0 != avro_file_writer_append_value(db, &record)) {
errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n",
__func__, __LINE__,
avro_strerror());
} else {
count ++;
}
} }
/* Close the handle and return */ avro_value_decref(&record);
fclose(fp); avro_value_iface_decref(wface);
taos_free_result(result); freeRecordSchema(recordSchema);
freeDbInfos(); avro_file_writer_close(db);
fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); avro_schema_decref(schema);
return 0;
_exit_failure: return count;
fclose(fp);
taos_close(taos);
taos_free_result(result);
freeDbInfos();
errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return -1;
} }
static int getTableDes( void freeBindArray(char *bindArray, int onlyCol)
char* dbName, char *table, {
TableDef *tableDes, bool isSuperTable) { TAOS_BIND *bind;
TAOS_ROW row = NULL;
TAOS_RES* res = NULL;
int colCount = 0;
TAOS *taos = taos_connect(g_args.host, for (int j = 0; j < onlyCol; j++) {
g_args.user, g_args.password, dbName, g_args.port); bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j));
if (NULL == taos) { if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type)
errorPrint( && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) {
"Failed to connect to TDengine server %s by specified database %s\n", tfree(bind->buffer);
g_args.host, dbName); }
}
}
static int dumpInOneAvroFile(char* fcharset,
char* encode, char *avroFilepath)
{
debugPrint("avroFilepath: %s\n", avroFilepath);
avro_file_reader_t reader;
if(avro_file_reader(avroFilepath, &reader)) {
fprintf(stderr, "Unable to open avro file %s: %s\n",
avroFilepath, avro_strerror());
return -1; return -1;
} }
char sqlstr[COMMAND_SIZE]; int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4;
sprintf(sqlstr, "describe %s.%s;", dbName, table); char *jsonbuf = calloc(1, buf_len);
assert(jsonbuf);
res = taos_query(taos, sqlstr); avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);;
int32_t code = taos_errno(res);
if (code != 0) { avro_schema_t schema;
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", schema = avro_file_reader_get_writer_schema(reader);
__func__, __LINE__, sqlstr, taos_errstr(res)); avro_schema_to_json(schema, jsonwriter);
taos_free_result(res);
taos_close(taos); if (0 == strlen(jsonbuf)) {
errorPrint("Failed to parse avro file: %s schema. reason: %s\n",
avroFilepath, avro_strerror());
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1; return -1;
} }
debugPrint("Schema:\n %s\n", jsonbuf);
TAOS_FIELD *fields = taos_fetch_fields(res); json_t *json_root = load_json(jsonbuf);
debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
if (g_args.debug_print) {
print_json(json_root);
}
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); const char *namespace = avro_schema_namespace((const avro_schema_t)schema);
while ((row = taos_fetch_row(res)) != NULL) { debugPrint("Namespace: %s\n", namespace);
tstrncpy(tableDes->cols[colCount].field,
(char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
min(TSDB_COL_NAME_LEN + 1, namespace, g_args.port);
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); if (taos == NULL) {
tstrncpy(tableDes->cols[colCount].type, errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
(char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], return -1;
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
tableDes->cols[colCount].length =
*((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
tstrncpy(tableDes->cols[colCount].note,
(char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
min(COL_NOTE_LEN,
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
colCount++;
} }
taos_free_result(res); TAOS_STMT *stmt = taos_stmt_init(taos);
res = NULL; if (NULL == stmt) {
taos_close(taos);
errorPrint("%s() LN%d, stmt init failed! reason: %s\n",
__func__, __LINE__, taos_errstr(NULL));
return -1;
}
if (isSuperTable) { RecordSchema *recordSchema = parse_json_to_recordschema(json_root);
return colCount; if (NULL == recordSchema) {
errorPrint("Failed to parse json to recordschema. reason: %s\n",
avro_strerror());
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1;
} }
json_decref(json_root);
// if child-table have tag, using select tagName from table to get tagValue TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
for (int i = 0 ; i < colCount; i++) { + sizeof(ColDes) * TSDB_MAX_COLUMNS);
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false);
tableDes->cols[i].field, dbName, table);
res = taos_query(taos, sqlstr); if (allColCount < 0) {
code = taos_errno(res); errorPrint("%s() LN%d, failed to get table[%s] schema\n",
if (code != 0) { __func__,
errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", __LINE__,
__func__, __LINE__, sqlstr, taos_errstr(res)); recordSchema->name);
taos_free_result(res); free(tableDes);
taos_close(taos); freeRecordSchema(recordSchema);
return -1; avro_schema_decref(schema);
} avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1;
}
fields = taos_fetch_fields(res); char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
assert(stmtBuffer);
char *pstr = stmtBuffer;
pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
row = taos_fetch_row(res); int onlyCol = 1; // at least timestamp
if (NULL == row) { for (int col = 1; col < allColCount; col++) {
errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue;
__func__, __LINE__, sqlstr, taos_errstr(res)); pstr += sprintf(pstr, ",?");
taos_free_result(res); onlyCol ++;
taos_close(taos); }
return -1; pstr += sprintf(pstr, ")");
}
if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) {
sprintf(tableDes->cols[i].note, "%s", "NUL"); errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n",
sprintf(tableDes->cols[i].value, "%s", "NULL"); taos_stmt_errstr(stmt));
taos_free_result(res);
res = NULL;
continue;
}
int32_t* length = taos_fetch_lengths(res); free(stmtBuffer);
free(tableDes);
freeRecordSchema(recordSchema);
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
return -1;
}
//int32_t* length = taos_fetch_lengths(tmpResult); if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) {
switch (fields[0].type) { errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n",
case TSDB_DATA_TYPE_BOOL: recordSchema->name, taos_stmt_errstr(stmt));
sprintf(tableDes->cols[i].value, "%d",
((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0)); free(stmtBuffer);
break; free(tableDes);
case TSDB_DATA_TYPE_TINYINT: avro_schema_decref(schema);
sprintf(tableDes->cols[i].value, "%d", avro_file_reader_close(reader);
*((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); avro_writer_free(jsonwriter);
break; return -1;
case TSDB_DATA_TYPE_SMALLINT: }
sprintf(tableDes->cols[i].value, "%d",
*((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); avro_value_iface_t *value_class = avro_generic_class_from_schema(schema);
break; avro_value_t value;
case TSDB_DATA_TYPE_INT: avro_generic_value_new(value_class, &value);
sprintf(tableDes->cols[i].value, "%d",
*((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); char *bindArray =
break; malloc(sizeof(TAOS_BIND) * onlyCol);
case TSDB_DATA_TYPE_BIGINT: assert(bindArray);
sprintf(tableDes->cols[i].value, "%" PRId64 "",
*((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); int success = 0;
break; int failed = 0;
case TSDB_DATA_TYPE_FLOAT: while(!avro_file_reader_read_value(reader, &value)) {
sprintf(tableDes->cols[i].value, "%f", memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol);
GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); TAOS_BIND *bind;
break;
case TSDB_DATA_TYPE_DOUBLE: for (int i = 0; i < recordSchema->num_fields; i++) {
sprintf(tableDes->cols[i].value, "%f", bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
break; avro_value_t field_value;
case TSDB_DATA_TYPE_BINARY:
memset(tableDes->cols[i].value, 0, FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
sizeof(tableDes->cols[i].value));
int len = strlen((char *)row[0]); bind->is_null = NULL;
// FIXME for long value int is_null = 1;
if (len < (COL_VALUEBUF_LEN - 2)) { if (0 == i) {
converStringToReadable( int64_t *ts = malloc(sizeof(int64_t));
(char *)row[0], assert(ts);
length[0],
tableDes->cols[i].value, avro_value_get_by_name(&value, field->name, &field_value, NULL);
len); avro_value_get_long(&field_value, ts);
} else {
tableDes->cols[i].var_value = calloc(1, len * 2); bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
if (tableDes->cols[i].var_value == NULL) { bind->buffer_length = sizeof(int64_t);
errorPrint("%s() LN%d, memory alalocation failed!\n", bind->buffer = ts;
__func__, __LINE__); bind->length = &bind->buffer_length;
taos_free_result(res); } else if (0 == avro_value_get_by_name(
return -1; &value, field->name, &field_value, NULL)) {
if (0 == strcasecmp(tableDes->cols[i].type, "int")) {
int32_t *n32 = malloc(sizeof(int32_t));
assert(n32);
avro_value_get_int(&field_value, n32);
debugPrint("%d | ", *n32);
bind->buffer_type = TSDB_DATA_TYPE_INT;
bind->buffer_length = sizeof(int32_t);
bind->buffer = n32;
} else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) {
int32_t *n8 = malloc(sizeof(int32_t));
assert(n8);
avro_value_get_int(&field_value, n8);
debugPrint("%d | ", *n8);
bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
bind->buffer_length = sizeof(int8_t);
bind->buffer = (int8_t *)n8;
} else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) {
int32_t *n16 = malloc(sizeof(int32_t));
assert(n16);
avro_value_get_int(&field_value, n16);
debugPrint("%d | ", *n16);
bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
bind->buffer_length = sizeof(int16_t);
bind->buffer = (int32_t*)n16;
} else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) {
int64_t *n64 = malloc(sizeof(int64_t));
assert(n64);
avro_value_get_long(&field_value, n64);
debugPrint("%"PRId64" | ", *n64);
bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
bind->buffer_length = sizeof(int64_t);
bind->buffer = n64;
} else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) {
int64_t *n64 = malloc(sizeof(int64_t));
assert(n64);
avro_value_get_long(&field_value, n64);
debugPrint("%"PRId64" | ", *n64);
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
bind->buffer_length = sizeof(int64_t);
bind->buffer = n64;
} else if (0 == strcasecmp(tableDes->cols[i].type, "float")) {
float *f = malloc(sizeof(float));
assert(f);
avro_value_get_float(&field_value, f);
if (TSDB_DATA_FLOAT_NULL == *f) {
debugPrint("%s | ", "NULL");
bind->is_null = &is_null;
} else {
debugPrint("%f | ", *f);
bind->buffer = f;
} }
converStringToReadable((char *)row[0], bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
length[0], bind->buffer_length = sizeof(float);
(char *)(tableDes->cols[i].var_value), len); } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) {
double *dbl = malloc(sizeof(double));
assert(dbl);
avro_value_get_double(&field_value, dbl);
if (TSDB_DATA_DOUBLE_NULL == *dbl) {
debugPrint("%s | ", "NULL");
bind->is_null = &is_null;
} else {
debugPrint("%f | ", *dbl);
bind->buffer = dbl;
}
bind->buffer = dbl;
bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
bind->buffer_length = sizeof(double);
} else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) {
size_t size;
char *buf = NULL;
avro_value_get_string(&field_value, (const char **)&buf, &size);
debugPrint("%s | ", (char *)buf);
bind->buffer_type = TSDB_DATA_TYPE_BINARY;
bind->buffer_length = tableDes->cols[i].length;
bind->buffer = buf;
} else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) {
size_t bytessize;
void *bytesbuf = NULL;
avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize);
debugPrint("%s | ", (char*)bytesbuf);
bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
bind->buffer_length = tableDes->cols[i].length;
bind->buffer = bytesbuf;
} else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) {
int32_t *bl = malloc(sizeof(int32_t));
assert(bl);
avro_value_get_boolean(&field_value, bl);
debugPrint("%s | ", (*bl)?"true":"false");
bind->buffer_type = TSDB_DATA_TYPE_BOOL;
bind->buffer_length = sizeof(int8_t);
bind->buffer = (int8_t*)bl;
} }
break;
case TSDB_DATA_TYPE_NCHAR: bind->length = &bind->buffer_length;
{ }
memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].value, "%s", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
#if 0
if (!g_args.mysqlFlag) {
sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
}
#endif
break;
default:
break;
} }
debugPrint("%s", "\n");
taos_free_result(res); if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
freeBindArray(bindArray, onlyCol);
failed --;
continue;
}
if (0 != taos_stmt_add_batch(stmt)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
freeBindArray(bindArray, onlyCol);
failed --;
continue;
}
freeBindArray(bindArray, onlyCol);
success ++;
continue;
}
if (0 != taos_stmt_execute(stmt)) {
errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
failed = success;
} }
avro_value_decref(&value);
avro_value_iface_decref(value_class);
tfree(bindArray);
tfree(stmtBuffer);
tfree(tableDes);
freeRecordSchema(recordSchema);
avro_schema_decref(schema);
avro_file_reader_close(reader);
avro_writer_free(jsonwriter);
tfree(jsonbuf);
taos_stmt_close(stmt);
taos_close(taos); taos_close(taos);
return colCount;
if (failed < 0)
return failed;
return success;
} }
static void dumpCreateDbClause( static void* dumpInAvroWorkThreadFp(void *arg)
SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { {
char sqlstr[TSDB_MAX_SQL_LEN] = {0}; threadInfo *pThread = (threadInfo*)arg;
setThreadName("dumpInAvroWorkThrd");
verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n",
pThread->threadIndex, pThread->count, pThread->from);
for (int64_t i = 0; i < pThread->count; i++) {
char avroFile[MAX_PATH_LEN];
sprintf(avroFile, "%s/%s", g_args.inpath,
g_tsDumpInAvroFiles[pThread->from + i]);
if (0 == dumpInOneAvroFile(g_tsCharset,
g_args.encode,
avroFile)) {
okPrint("[%d] Success dump in file: %s\n",
pThread->threadIndex, avroFile);
}
}
char *pstr = sqlstr; return NULL;
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); }
if (isDumpProperty) {
pstr += sprintf(pstr, static int64_t dumpInAvroWorkThreads()
"REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", {
dbInfo->replica, dbInfo->quorum, dbInfo->days, int64_t ret = 0;
dbInfo->keeplist,
dbInfo->cache, int32_t threads = g_args.thread_num;
dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
dbInfo->fsync, uint64_t avroFileCount = getFilesNum("avro");
dbInfo->cachelast, if (0 == avroFileCount) {
dbInfo->comp, dbInfo->precision, dbInfo->update); debugPrint("No .avro file found in %s\n", g_args.inpath);
return 0;
} }
pstr += sprintf(pstr, ";"); createDumpinList("avro", avroFileCount);
fprintf(fp, "%s\n\n", sqlstr);
threadInfo *pThread;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = (threadInfo *)calloc(
threads, sizeof(threadInfo));
assert(pids);
assert(infos);
int64_t a = avroFileCount / threads;
if (a < 1) {
threads = avroFileCount;
a = 1;
}
int64_t b = 0;
if (threads != 0) {
b = avroFileCount % threads;
}
int64_t from = 0;
for (int32_t t = 0; t < threads; ++t) {
pThread = infos + t;
pThread->threadIndex = t;
pThread->from = from;
pThread->count = t<b?a+1:a;
from += pThread->count;
verbosePrint(
"Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n",
t, pThread->count, pThread->from);
if (pthread_create(pids + t, NULL,
dumpInAvroWorkThreadFp, (void*)pThread) != 0) {
errorPrint("%s() LN%d, thread[%d] failed to start\n",
__func__, __LINE__, pThread->threadIndex);
exit(EXIT_FAILURE);
}
}
for (int t = 0; t < threads; ++t) {
pthread_join(pids[t], NULL);
}
free(infos);
free(pids);
freeFileList(g_tsDumpInAvroFiles, avroFileCount);
return ret;
} }
static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, #endif /* AVRO_SUPPORT */
FILE *fp, char* dbName) {
int counter = 0;
int count_temp = 0;
char sqlstr[COMMAND_SIZE];
char* pstr = sqlstr; static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
{
int64_t totalRows = 0;
pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", int32_t sql_buf_len = g_args.max_sql_len;
dbName, tableDes->name); char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
assert(tmpBuffer);
for (; counter < numOfCols; counter++) { char *pstr = tmpBuffer;
if (tableDes->cols[counter].note[0] != '\0') break;
if (counter == 0) { TAOS_ROW row = NULL;
pstr += sprintf(pstr, " (%s %s", int rowFlag = 0;
tableDes->cols[counter].field, tableDes->cols[counter].type); int64_t lastRowsPrint = 5000000;
int count = 0;
int numFields = taos_field_count(res);
assert(numFields > 0);
TAOS_FIELD *fields = taos_fetch_fields(res);
int32_t curr_sqlstr_len = 0;
int32_t total_sqlstr_len = 0;
while ((row = taos_fetch_row(res)) != NULL) {
curr_sqlstr_len = 0;
int32_t* length = taos_fetch_lengths(res); // act len
if (count == 0) {
total_sqlstr_len = 0;
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"INSERT INTO %s.%s VALUES (", dbName, tbName);
} else { } else {
pstr += sprintf(pstr, ", %s %s", if (g_args.mysqlFlag) {
tableDes->cols[counter].field, tableDes->cols[counter].type); if (0 == rowFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
rowFlag++;
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
}
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
}
} }
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || for (int col = 0; col < numFields; col++) {
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
} if (row[col] == NULL) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
continue;
}
switch (fields[col].type) {
case TSDB_DATA_TYPE_BOOL:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
((((int32_t)(*((char *)row[col])))==1)?1:0));
break;
case TSDB_DATA_TYPE_TINYINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int8_t *)row[col]));
break;
case TSDB_DATA_TYPE_SMALLINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int16_t *)row[col]));
break;
case TSDB_DATA_TYPE_INT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
*((int32_t *)row[col]));
break;
case TSDB_DATA_TYPE_BIGINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"%" PRId64 "",
*((int64_t *)row[col]));
break;
case TSDB_DATA_TYPE_FLOAT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
GET_FLOAT_VAL(row[col]));
break;
case TSDB_DATA_TYPE_DOUBLE:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
GET_DOUBLE_VAL(row[col]));
break;
case TSDB_DATA_TYPE_BINARY:
{
char tbuf[COMMAND_SIZE] = {0};
converStringToReadable((char *)row[col], length[col],
tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR:
{
char tbuf[COMMAND_SIZE] = {0};
convertNCharToReadable((char *)row[col], length[col],
tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
if (!g_args.mysqlFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"%" PRId64 "",
*(int64_t *)row[col]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[col]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
"\'%s.%03d\'",
buf, (int)(ts % 1000));
}
break;
default:
break;
}
}
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
totalRows, dbName, tbName);
lastRowsPrint += 5000000;
}
total_sqlstr_len += curr_sqlstr_len;
if ((count >= g_args.data_batch)
|| (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
fprintf(fp, ";\n");
count = 0;
}
}
debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
fprintf(fp, "\n");
free(tmpBuffer);
return totalRows;
}
static int64_t dumpTableData(FILE *fp, char *tbName,
char* dbName, int precision,
char *jsonSchema) {
int64_t totalRows = 0;
char sqlstr[1024] = {0};
int64_t start_time, end_time;
if (strlen(g_args.humanStartTime)) {
if (TSDB_CODE_SUCCESS != taosParseTime(
g_args.humanStartTime, &start_time,
strlen(g_args.humanStartTime),
precision, 0)) {
errorPrint("Input %s, time format error!\n",
g_args.humanStartTime);
return -1;
}
} else {
start_time = g_args.start_time;
}
if (strlen(g_args.humanEndTime)) {
if (TSDB_CODE_SUCCESS != taosParseTime(
g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
precision, 0)) {
errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
return -1;
}
} else {
end_time = g_args.end_time;
}
sprintf(sqlstr,
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbName, start_time, end_time);
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbName, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbName);
return -1;
}
TAOS_RES* res = taos_query(taos, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
errorPrint("failed to run command %s, reason: %s\n",
sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
}
#ifdef AVRO_SUPPORT
if (g_args.avro) {
char avroFilename[MAX_PATH_LEN] = {0};
if (g_args.outpath[0] != 0) {
sprintf(avroFilename, "%s/%s.%s.avro",
g_args.outpath, dbName, tbName);
} else {
sprintf(avroFilename, "%s.%s.avro",
dbName, tbName);
}
totalRows = writeResultToAvro(avroFilename, jsonSchema, res);
} else
#endif
totalRows = writeResultToSql(res, fp, dbName, tbName);
taos_free_result(res);
taos_close(taos);
return totalRows;
}
static int64_t dumpNormalTable(
TAOS *taos,
char *dbName,
char *stable,
char *tbName,
int precision,
FILE *fp
) {
int colCount = 0;
TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ sizeof(ColDes) * TSDB_MAX_COLUMNS);
if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
colCount = getTableDes(taos, dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create child-table using super-table
dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
} else { // dump table definition
colCount = getTableDes(taos, dbName, tbName, tableDes, false);
if (colCount < 0) {
errorPrint("%s() LN%d, failed to get table[%s] schema\n",
__func__,
__LINE__,
tbName);
free(tableDes);
return -1;
}
// create normal-table or super-table
dumpCreateTableClause(tableDes, colCount, fp, dbName);
}
char *jsonSchema = NULL;
#ifdef AVRO_SUPPORT
if (g_args.avro) {
if (0 != convertTbDesToJson(
dbName, tbName, tableDes, colCount, &jsonSchema)) {
errorPrint("%s() LN%d, convertTbDesToJson failed\n",
__func__,
__LINE__);
freeTbDes(tableDes);
return -1;
}
}
#endif
int64_t totalRows = 0;
if (!g_args.schemaonly) {
totalRows = dumpTableData(fp, tbName, dbName, precision,
jsonSchema);
}
tfree(jsonSchema);
freeTbDes(tableDes);
return totalRows;
}
static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName)
{
int64_t count = 0;
char tmpBuf[MAX_PATH_LEN] = {0};
FILE *fp = NULL;
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%s.sql",
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
}
count = dumpNormalTable(
taos,
dbInfo->name,
NULL,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
if (count > 0) {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
fclose(fp);
return count;
}
static int64_t dumpNormalTableBelongStb(
TAOS *taos,
SDbInfo *dbInfo, char *stbName, char *ntbName)
{
int64_t count = 0;
char tmpBuf[MAX_PATH_LEN] = {0};
FILE *fp = NULL;
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%s.sql",
g_args.outpath, dbInfo->name, ntbName);
} else {
sprintf(tmpBuf, "%s.%s.sql",
dbInfo->name, ntbName);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
}
count = dumpNormalTable(
taos,
dbInfo->name,
stbName,
ntbName,
getPrecisionByString(dbInfo->precision),
fp);
if (count > 0) {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
fclose(fp);
return count;
}
static void *dumpNtbOfDb(void *arg) {
threadInfo *pThreadInfo = (threadInfo *)arg;
debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
debugPrint("dump table count = \t%"PRId64"\n",
pThreadInfo->count);
FILE *fp = NULL;
char tmpBuf[MAX_PATH_LEN] = {0};
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.%d.sql",
g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
} else {
sprintf(tmpBuf, "%s.%d.sql",
pThreadInfo->dbName, pThreadInfo->threadIndex);
}
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return NULL;
}
int64_t count;
for (int64_t i = 0; i < pThreadInfo->count; i++) {
debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
pThreadInfo->threadIndex, i,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name);
count = dumpNormalTable(
pThreadInfo->taos,
pThreadInfo->dbName,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable,
((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name,
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} else {
atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
}
fclose(fp);
return NULL;
}
static int checkParam() {
if (g_args.all_databases && g_args.databases) {
errorPrint("%s", "conflict option --all-databases and --databases\n");
return -1;
}
if (g_args.start_time > g_args.end_time) {
errorPrint("%s", "start time is larger than end time\n");
return -1;
}
if (g_args.arg_list_len == 0) {
if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
errorPrint("%s", "taosdump requires parameters\n");
return -1;
}
}
/*
if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
fprintf(stderr, "duplicate parameter input and output file path\n");
return -1;
}
*/
if (!g_args.isDumpIn && g_args.encode != NULL) {
fprintf(stderr, "invalid option in dump out\n");
return -1;
}
if (g_args.table_batch <= 0) {
fprintf(stderr, "invalid option in dump out\n");
return -1;
}
return 0;
}
/*
static bool isEmptyCommand(char *cmd) {
char *pchar = cmd;
while (*pchar != '\0') {
if (*pchar != ' ') return false;
pchar++;
}
return true;
}
static void taosReplaceCtrlChar(char *str) {
bool ctrlOn = false;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) {
if (ctrlOn) {
switch (*str) {
case 'n':
*pstr = '\n';
pstr++;
break;
case 'r':
*pstr = '\r';
pstr++;
break;
case 't':
*pstr = '\t';
pstr++;
break;
case '\\':
*pstr = '\\';
pstr++;
break;
case '\'':
*pstr = '\'';
pstr++;
break;
default:
break;
}
ctrlOn = false;
} else {
if (*str == '\\') {
ctrlOn = true;
} else {
*pstr = *str;
pstr++;
}
}
}
*pstr = '\0';
}
*/
char *ascii_literal_list[] = {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
"\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
"\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
"\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
"4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
"\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
"\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
"\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
"\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
"\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
"\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
"\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
"\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
char *pstr = str;
char *pbuf = buf;
while (size > 0) {
if (*pstr == '\0') break;
pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
pstr++;
size--;
} }
*pbuf = '\0';
return 0;
}
count_temp = counter; static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
char *pstr = str;
for (; counter < numOfCols; counter++) { char *pbuf = buf;
if (counter == count_temp) { wchar_t wc;
pstr += sprintf(pstr, ") TAGS (%s %s", while (size > 0) {
tableDes->cols[counter].field, tableDes->cols[counter].type); if (*pstr == '\0') break;
} else { int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
pstr += sprintf(pstr, ", %s %s", if (byte_width < 0) {
tableDes->cols[counter].field, tableDes->cols[counter].type); errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
exit(-1);
} }
if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || if ((int)wc < 256) {
strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); } else {
memcpy(pbuf, pstr, byte_width);
pbuf += byte_width;
} }
pstr += byte_width;
} }
pstr += sprintf(pstr, ");"); *pbuf = '\0';
debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
return fprintf(fp, "%s\n\n", sqlstr);
}
static int writeSchemaToAvro(char *jsonAvroSchema)
{
errorPrint("%s() LN%d, TODO: implement write schema to avro",
__func__, __LINE__);
return 0; return 0;
} }
static int64_t writeResultToAvro(TAOS_RES *res) static void dumpCharset(FILE *fp) {
{ char charsetline[256];
errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
return 0; (void)fseek(fp, 0, SEEK_SET);
sprintf(charsetline, "#!%s\n", tsCharset);
(void)fwrite(charsetline, strlen(charsetline), 1, fp);
} }
static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) static void loadFileCharset(FILE *fp, char *fcharset) {
{ char * line = NULL;
int64_t totalRows = 0; size_t line_size = 0;
int32_t sql_buf_len = g_args.max_sql_len; (void)fseek(fp, 0, SEEK_SET);
char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); ssize_t size = getline(&line, &line_size, fp);
if (tmpBuffer == NULL) { if (size <= 2) {
errorPrint("failed to allocate %d memory\n", sql_buf_len + 128); goto _exit_no_charset;
return -1;
} }
char *pstr = tmpBuffer; if (strncmp(line, "#!", 2) != 0) {
goto _exit_no_charset;
TAOS_ROW row = NULL; }
int numFields = 0; if (line[size - 1] == '\n') {
int rowFlag = 0; line[size - 1] = '\0';
int64_t lastRowsPrint = 5000000; size--;
int count = 0; }
strcpy(fcharset, line + 2);
numFields = taos_field_count(res); tfree(line);
assert(numFields > 0); return;
TAOS_FIELD *fields = taos_fetch_fields(res);
int32_t curr_sqlstr_len = 0; _exit_no_charset:
int32_t total_sqlstr_len = 0; (void)fseek(fp, 0, SEEK_SET);
*fcharset = '\0';
tfree(line);
return;
}
while ((row = taos_fetch_row(res)) != NULL) { // ======== dumpIn support multi threads functions ================================//
curr_sqlstr_len = 0;
int32_t* length = taos_fetch_lengths(res); // act len static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset,
char* encode, char* fileName) {
int read_len = 0;
char * cmd = NULL;
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;
if (count == 0) { cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
total_sqlstr_len = 0; if (cmd == NULL) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, errorPrint("%s() LN%d, failed to allocate memory\n",
"INSERT INTO %s.%s VALUES (", dbName, tbName); __func__, __LINE__);
} else { return -1;
if (g_args.mysqlFlag) { }
if (0 == rowFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
rowFlag++;
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
}
} else {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
}
}
for (int col = 0; col < numFields; col++) { int lastRowsPrint = 5000000;
if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); int lineNo = 0;
while ((read_len = getline(&line, &line_len, fp)) != -1) {
++lineNo;
if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
line[--read_len] = '\0';
if (row[col] == NULL) { //if (read_len == 0 || isCommentLine(line)) { // line starts with #
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); if (read_len == 0 ) {
continue; continue;
} }
switch (fields[col].type) { if (line[read_len - 1] == '\\') {
case TSDB_DATA_TYPE_BOOL: line[read_len - 1] = ' ';
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", memcpy(cmd + cmd_len, line, read_len);
((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); cmd_len += read_len;
break; continue;
case TSDB_DATA_TYPE_TINYINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
break;
case TSDB_DATA_TYPE_SMALLINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
break;
case TSDB_DATA_TYPE_INT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
break;
case TSDB_DATA_TYPE_BIGINT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
*((int64_t *)row[col]));
break;
case TSDB_DATA_TYPE_FLOAT:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
break;
case TSDB_DATA_TYPE_DOUBLE:
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
break;
case TSDB_DATA_TYPE_BINARY:
{
char tbuf[COMMAND_SIZE] = {0};
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR:
{
char tbuf[COMMAND_SIZE] = {0};
convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
if (!g_args.mysqlFlag) {
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
*(int64_t *)row[col]);
} else {
char buf[64] = "\0";
int64_t ts = *((int64_t *)row[col]);
time_t tt = (time_t)(ts / 1000);
struct tm *ptm = localtime(&tt);
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
buf, (int)(ts % 1000));
}
break;
default:
break;
}
} }
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); memcpy(cmd + cmd_len, line, read_len);
cmd[read_len + cmd_len]= '\0';
if (queryDbImpl(taos, cmd)) {
errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
__func__, __LINE__, lineNo, fileName);
fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
}
totalRows++; memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
count++; cmd_len = 0;
fprintf(fp, "%s", tmpBuffer);
if (totalRows >= lastRowsPrint) { if (lineNo >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", printf(" %d lines already be executed from file %s\n", lineNo, fileName);
totalRows, dbName, tbName);
lastRowsPrint += 5000000; lastRowsPrint += 5000000;
} }
}
total_sqlstr_len += curr_sqlstr_len; tfree(cmd);
tfree(line);
return 0;
}
if ((count >= g_args.data_batch) static void* dumpInSqlWorkThreadFp(void *arg)
|| (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { {
fprintf(fp, ";\n"); threadInfo *pThread = (threadInfo*)arg;
count = 0; setThreadName("dumpInSqlWorkThrd");
fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n",
pThread->threadIndex, pThread->count, pThread->from);
for (int64_t i = 0; i < pThread->count; i++) {
char sqlFile[MAX_PATH_LEN];
sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]);
FILE* fp = openDumpInFile(sqlFile);
if (NULL == fp) {
errorPrint("[%d] Failed to open input file: %s\n",
pThread->threadIndex, sqlFile);
continue;
} }
if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode,
sqlFile)) {
okPrint("[%d] Success dump in file: %s\n",
pThread->threadIndex, sqlFile);
}
fclose(fp);
} }
debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); return NULL;
}
fprintf(fp, "\n"); static int dumpInSqlWorkThreads()
atomic_add_fetch_64(&g_totalDumpOutRows, totalRows); {
free(tmpBuffer); int32_t threads = g_args.thread_num;
return 0; uint64_t sqlFileCount = getFilesNum("sql");
} if (0 == sqlFileCount) {
debugPrint("No .sql file found in %s\n", g_args.inpath);
return 0;
}
static int64_t dumpTableData(FILE *fp, char *tbName, createDumpinList("sql", sqlFileCount);
char* dbName, int precision,
char *jsonAvroSchema) { threadInfo *pThread;
int64_t totalRows = 0;
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = (threadInfo *)calloc(
threads, sizeof(threadInfo));
assert(pids);
assert(infos);
int64_t a = sqlFileCount / threads;
if (a < 1) {
threads = sqlFileCount;
a = 1;
}
int64_t b = 0;
if (threads != 0) {
b = sqlFileCount % threads;
}
char sqlstr[1024] = {0}; int64_t from = 0;
int64_t start_time, end_time; for (int32_t t = 0; t < threads; ++t) {
if (strlen(g_args.humanStartTime)) { pThread = infos + t;
if (TSDB_CODE_SUCCESS != taosParseTime( pThread->threadIndex = t;
g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime),
precision, 0)) { pThread->from = from;
errorPrint("Input %s, time format error!\n", g_args.humanStartTime); pThread->count = t<b?a+1:a;
from += pThread->count;
verbosePrint(
"Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n",
t, pThread->count, pThread->from);
pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
NULL, g_args.port);
if (pThread->taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
free(infos);
free(pids);
return -1; return -1;
} }
} else {
start_time = g_args.start_time;
}
if (strlen(g_args.humanEndTime)) { if (pthread_create(pids + t, NULL,
if (TSDB_CODE_SUCCESS != taosParseTime( dumpInSqlWorkThreadFp, (void*)pThread) != 0) {
g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), errorPrint("%s() LN%d, thread[%d] failed to start\n",
precision, 0)) { __func__, __LINE__, pThread->threadIndex);
errorPrint("Input %s, time format error!\n", g_args.humanEndTime); exit(EXIT_FAILURE);
return -1;
} }
} else {
end_time = g_args.end_time;
} }
sprintf(sqlstr, for (int t = 0; t < threads; ++t) {
"select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", pthread_join(pids[t], NULL);
dbName, tbName, start_time, end_time);
TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbName, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbName);
return -1;
} }
TAOS_RES* res = taos_query(taos, sqlstr); for (int t = 0; t < threads; ++t) {
int32_t code = taos_errno(res); taos_close(infos[t].taos);
if (code != 0) {
errorPrint("failed to run command %s, reason: %s\n",
sqlstr, taos_errstr(res));
taos_free_result(res);
taos_close(taos);
return -1;
} }
free(infos);
free(pids);
if (g_args.avro) { freeFileList(g_tsDumpInSqlFiles, sqlFileCount);
writeSchemaToAvro(jsonAvroSchema);
totalRows = writeResultToAvro(res);
} else {
totalRows = writeResultToSql(res, fp, dbName, tbName);
}
taos_free_result(res); return 0;
taos_close(taos);
return totalRows;
} }
static int checkParam() { static int dumpInDbs()
if (g_args.all_databases && g_args.databases) { {
errorPrint("%s", "conflict option --all-databases and --databases\n"); TAOS *taos = taos_connect(
return -1; g_args.host, g_args.user, g_args.password,
} NULL, g_args.port);
if (g_args.start_time > g_args.end_time) { if (taos == NULL) {
errorPrint("%s", "start time is larger than end time\n"); errorPrint("%s() LN%d, failed to connect to TDengine server\n",
__func__, __LINE__);
return -1; return -1;
} }
if (g_args.arg_list_len == 0) { char dbsSql[MAX_PATH_LEN];
if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql");
errorPrint("%s", "taosdump requires parameters\n");
return -1; FILE *fp = openDumpInFile(dbsSql);
} if (NULL == fp) {
} errorPrint("%s() LN%d, failed to open input file %s\n",
/* __func__, __LINE__, dbsSql);
if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
fprintf(stderr, "duplicate parameter input and output file path\n");
return -1;
}
*/
if (!g_args.isDumpIn && g_args.encode != NULL) {
fprintf(stderr, "invalid option in dump out\n");
return -1; return -1;
} }
debugPrint("Success Open input file: %s\n", dbsSql);
loadFileCharset(fp, g_tsCharset);
if (g_args.table_batch <= 0) { if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) {
fprintf(stderr, "invalid option in dump out\n"); okPrint("Success dump in file: %s !\n", dbsSql);
return -1;
} }
fclose(fp);
taos_close(taos);
return 0; return 0;
} }
/* static int64_t dumpIn() {
static bool isEmptyCommand(char *cmd) { assert(g_args.isDumpIn);
char *pchar = cmd;
while (*pchar != '\0') { int64_t ret = 0;
if (*pchar != ' ') return false; if (dumpInDbs()) {
pchar++; errorPrint("%s", "Failed to dump dbs in!\n");
} exit(EXIT_FAILURE);
}
return true; ret = dumpInSqlWorkThreads();
#ifdef AVRO_SUPPORT
if (0 == ret) {
ret = dumpInAvroWorkThreads();
}
#endif
return ret;
} }
static void taosReplaceCtrlChar(char *str) { static void *dumpNormalTablesOfStb(void *arg) {
bool ctrlOn = false; threadInfo *pThreadInfo = (threadInfo *)arg;
char *pstr = NULL;
for (pstr = str; *str != '\0'; ++str) { debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
if (ctrlOn) { debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count);
switch (*str) {
case 'n': char command[COMMAND_SIZE];
*pstr = '\n';
pstr++; sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
break; pThreadInfo->dbName, pThreadInfo->stbName,
case 'r': pThreadInfo->count, pThreadInfo->from);
*pstr = '\r';
pstr++; TAOS_RES *res = taos_query(pThreadInfo->taos, command);
break; int32_t code = taos_errno(res);
case 't': if (code) {
*pstr = '\t'; errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
pstr++; __func__, __LINE__, command, taos_errstr(res));
break; taos_free_result(res);
case '\\': return NULL;
*pstr = '\\';
pstr++;
break;
case '\'':
*pstr = '\'';
pstr++;
break;
default:
break;
}
ctrlOn = false;
} else {
if (*str == '\\') {
ctrlOn = true;
} else {
*pstr = *str;
pstr++;
}
} }
}
*pstr = '\0'; FILE *fp = NULL;
} char tmpBuf[MAX_PATH_LEN] = {0};
*/
char *ascii_literal_list[] = { if (g_args.outpath[0] != 0) {
"\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", sprintf(tmpBuf, "%s/%s.%s.%d.sql",
"\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", g_args.outpath,
"\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", pThreadInfo->dbName,
"\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", pThreadInfo->stbName,
"4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", pThreadInfo->threadIndex);
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", } else {
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", sprintf(tmpBuf, "%s.%s.%d.sql",
"[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", pThreadInfo->dbName,
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", pThreadInfo->stbName,
"u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", pThreadInfo->threadIndex);
"\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", }
"\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
"\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
"\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
"\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
"\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
"\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
"\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
"\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
"\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
static int converStringToReadable(char *str, int size, char *buf, int bufsize) { fp = fopen(tmpBuf, "w");
char *pstr = str;
char *pbuf = buf; if (fp == NULL) {
while (size > 0) { errorPrint("%s() LN%d, failed to open file %s\n",
if (*pstr == '\0') break; __func__, __LINE__, tmpBuf);
pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); return NULL;
pstr++;
size--;
} }
*pbuf = '\0';
return 0;
}
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { TAOS_ROW row = NULL;
char *pstr = str; int64_t i = 0;
char *pbuf = buf; int64_t count;
wchar_t wc; while((row = taos_fetch_row(res)) != NULL) {
while (size > 0) { debugPrint("[%d] sub table %"PRId64": name: %s\n",
if (*pstr == '\0') break; pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
if (byte_width < 0) {
errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
exit(-1);
}
if ((int)wc < 256) { count = dumpNormalTable(
pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); pThreadInfo->taos,
pThreadInfo->dbName,
pThreadInfo->stbName,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
pThreadInfo->precision,
fp);
if (count < 0) {
break;
} else { } else {
memcpy(pbuf, pstr, byte_width); atomic_add_fetch_64(&g_totalDumpOutRows, count);
pbuf += byte_width;
} }
pstr += byte_width;
} }
*pbuf = '\0'; fclose(fp);
return NULL;
}
static int64_t dumpNtbOfDbByThreads(
SDbInfo *dbInfo,
int64_t ntbCount)
{
if (ntbCount <= 0) {
return 0;
}
int threads = g_args.thread_num;
return 0; int64_t a = ntbCount / threads;
} if (a < 1) {
threads = ntbCount;
a = 1;
}
static void dumpCharset(FILE *fp) { assert(threads);
char charsetline[256]; int64_t b = ntbCount % threads;
(void)fseek(fp, 0, SEEK_SET); threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
sprintf(charsetline, "#!%s\n", tsCharset); pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
(void)fwrite(charsetline, strlen(charsetline), 1, fp); assert(pids);
} assert(infos);
static void loadFileCharset(FILE *fp, char *fcharset) { for (int64_t i = 0; i < threads; i++) {
char * line = NULL; threadInfo *pThreadInfo = infos + i;
size_t line_size = 0; pThreadInfo->taos = taos_connect(
g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
(void)fseek(fp, 0, SEEK_SET); return -1;
ssize_t size = getline(&line, &line_size, fp); }
if (size <= 2) {
goto _exit_no_charset; pThreadInfo->threadIndex = i;
pThreadInfo->count = (i<b)?a+1:a;
pThreadInfo->from = (i==0)?0:
((threadInfo *)(infos + i - 1))->from +
((threadInfo *)(infos + i - 1))->count;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
} }
if (strncmp(line, "#!", 2) != 0) { for (int64_t i = 0; i < threads; i++) {
goto _exit_no_charset; pthread_join(pids[i], NULL);
} }
if (line[size - 1] == '\n') {
line[size - 1] = '\0'; for (int64_t i = 0; i < threads; i++) {
size--; threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
} }
strcpy(fcharset, line + 2);
tfree(line); free(pids);
return; free(infos);
_exit_no_charset: return 0;
(void)fseek(fp, 0, SEEK_SET);
*fcharset = '\0';
tfree(line);
return;
} }
// ======== dumpIn support multi threads functions ================================// static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
static char **g_tsDumpInSqlFiles = NULL;
static int32_t g_tsSqlFileNum = 0;
static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
static char g_tsCharset[64] = {0};
static int taosGetFilesNum(const char *directoryName,
const char *prefix, const char *prefix2)
{ {
char cmd[1024] = { 0 }; TAOS *taos = taos_connect(g_args.host,
g_args.user, g_args.password, dbInfo->name, g_args.port);
if (NULL == taos) {
errorPrint(
"Failed to connect to TDengine server %s by specified database %s\n",
g_args.host, dbInfo->name);
return 0;
}
if (prefix2) char command[COMMAND_SIZE];
sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ", TAOS_RES *result;
directoryName, prefix, directoryName, prefix2); int32_t code;
else
sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
FILE *fp = popen(cmd, "r"); sprintf(command, "USE %s", dbInfo->name);
if (fp == NULL) { result = taos_query(taos, command);
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); code = taos_errno(result);
exit(-1); if (code != 0) {
errorPrint("invalid database %s, reason: %s\n",
dbInfo->name, taos_errstr(result));
taos_close(taos);
return 0;
} }
int fileNum = 0; sprintf(command, "SHOW TABLES");
if (fscanf(fp, "%d", &fileNum) != 1) { result = taos_query(taos, command);
errorPrint("failed to execute:%s, parse result error\n", cmd); code = taos_errno(result);
exit(-1); if (code != 0) {
errorPrint("Failed to show %s\'s tables, reason: %s\n",
dbInfo->name, taos_errstr(result));
taos_close(taos);
return 0;
} }
if (fileNum <= 0) { g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
errorPrint("directory:%s is empty\n", directoryName); assert(g_tablesList);
exit(-1);
TAOS_ROW row;
int64_t count = 0;
while(NULL != (row = taos_fetch_row(result))) {
debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
__func__, __LINE__,
count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
tstrncpy(((TableInfo *)(g_tablesList + count))->name,
(char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
if (stbName) {
tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
(char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
((TableInfo *)(g_tablesList + count))->belongStb = true;
}
count ++;
} }
taos_close(taos);
int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
free(g_tablesList);
g_tablesList = NULL;
pclose(fp); return records;
return fileNum;
} }
static void taosParseDirectory(const char *directoryName, static int64_t dumpNtbOfStbByThreads(
const char *prefix, const char *prefix2, SDbInfo *dbInfo, char *stbName)
char **fileArray, int totalFiles)
{ {
char cmd[1024] = { 0 }; int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
if (prefix2) { if (ntbCount <= 0) {
sprintf(cmd, "ls %s/*.%s %s/*.%s | sort", return 0;
directoryName, prefix, directoryName, prefix2);
} else {
sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
} }
FILE *fp = popen(cmd, "r"); int threads = g_args.thread_num;
if (fp == NULL) {
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
exit(-1);
}
int fileNum = 0; int64_t a = ntbCount / threads;
while (fscanf(fp, "%128s", fileArray[fileNum++])) { if (a < 1) {
if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) { threads = ntbCount;
fileNum--; a = 1;
}
if (fileNum >= totalFiles) {
break;
}
} }
if (fileNum != totalFiles) { assert(threads);
errorPrint("directory:%s changed while read\n", directoryName); int64_t b = ntbCount % threads;
pclose(fp);
exit(-1);
}
pclose(fp); pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
} threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
assert(pids);
assert(infos);
static void taosCheckDatabasesSQLFile(const char *directoryName) for (int64_t i = 0; i < threads; i++) {
{ threadInfo *pThreadInfo = infos + i;
char cmd[1024] = { 0 }; pThreadInfo->taos = taos_connect(
sprintf(cmd, "ls %s/dbs.sql", directoryName); g_args.host,
g_args.user,
g_args.password,
dbInfo->name,
g_args.port
);
if (NULL == pThreadInfo->taos) {
errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
__func__,
__LINE__,
taos_errstr(NULL));
free(pids);
free(infos);
FILE *fp = popen(cmd, "r"); return -1;
if (fp == NULL) { }
errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
exit(-1);
}
while (fscanf(fp, "%128s", g_tsDbSqlFile)) { pThreadInfo->threadIndex = i;
break; pThreadInfo->count = (i<b)?a+1:a;
} pThreadInfo->from = (i==0)?0:
((threadInfo *)(infos + i - 1))->from +
((threadInfo *)(infos + i - 1))->count;
strcpy(pThreadInfo->dbName, dbInfo->name);
pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
pclose(fp); strcpy(pThreadInfo->stbName, stbName);
} pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
}
static void taosMallocDumpFiles() for (int64_t i = 0; i < threads; i++) {
{ pthread_join(pids[i], NULL);
g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
for (int i = 0; i < g_tsSqlFileNum; i++) {
g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
} }
}
static void freeDumpFiles() int64_t records = 0;
{ for (int64_t i = 0; i < threads; i++) {
for (int i = 0; i < g_tsSqlFileNum; i++) { threadInfo *pThreadInfo = infos + i;
tfree(g_tsDumpInSqlFiles[i]); records += pThreadInfo->rowsOfDumpOut;
taos_close(pThreadInfo->taos);
} }
tfree(g_tsDumpInSqlFiles);
free(pids);
free(infos);
return records;
} }
static void taosGetDirectoryFileList(char *inputDir) static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
{ {
struct stat fileStat; dumpCreateDbClause(dbInfo, g_args.with_property, fp);
if (stat(inputDir, &fileStat) < 0) {
errorPrint("%s not exist\n", inputDir);
exit(-1);
}
if (fileStat.st_mode & S_IFDIR) { fprintf(g_fpOfResult, "\n#### database: %s\n",
taosCheckDatabasesSQLFile(inputDir); dbInfo->name);
if (g_args.avro) g_resultStatistics.totalDatabasesOfDumpOut++;
g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
else
g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
int tsSqlFileNumOfTbls = g_tsSqlFileNum; dumpCreateSTableClauseOfDb(dbInfo, fp);
if (g_tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
}
taosMallocDumpFiles();
if (0 != tsSqlFileNumOfTbls) {
if (g_args.avro) {
taosParseDirectory(inputDir, "sql", "avro",
g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
} else {
taosParseDirectory(inputDir, "sql", NULL,
g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
}
}
fprintf(stdout, "\nstart to dispose %d files in %s\n",
g_tsSqlFileNum, inputDir);
} else {
errorPrint("%s is not a directory\n", inputDir);
exit(-1);
}
}
static FILE* taosOpenDumpInFile(char *fptr) { return dumpNTablesOfDb(dbInfo);
wordexp_t full_path; }
if (wordexp(fptr, &full_path, 0) != 0) { static int dumpOut() {
errorPrint("illegal file name: %s\n", fptr); TAOS *taos = NULL;
return NULL; TAOS_RES *result = NULL;
}
char *fname = full_path.we_wordv[0]; TAOS_ROW row;
FILE *fp = NULL;
int32_t count = 0;
FILE *f = NULL; char tmpBuf[MAX_PATH_LEN] = {0};
if ((fname) && (strlen(fname) > 0)) { if (g_args.outpath[0] != 0) {
f = fopen(fname, "r"); sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
if (f == NULL) { } else {
errorPrint("%s() LN%d, failed to open file %s\n", sprintf(tmpBuf, "dbs.sql");
__func__, __LINE__, fname);
}
} }
wordfree(&full_path); fp = fopen(tmpBuf, "w");
return f; if (fp == NULL) {
} errorPrint("%s() LN%d, failed to open file %s\n",
__func__, __LINE__, tmpBuf);
return -1;
}
static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, g_args.dumpDbCount = getDumpDbCount();
char* encode, char* fileName) { debugPrint("%s() LN%d, dump db count: %d\n",
int read_len = 0; __func__, __LINE__, g_args.dumpDbCount);
char * cmd = NULL;
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;
cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); if (0 == g_args.dumpDbCount) {
if (cmd == NULL) { errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
errorPrint("%s() LN%d, failed to allocate memory\n", fclose(fp);
__func__, __LINE__);
return -1; return -1;
} }
int lastRowsPrint = 5000000; g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
int lineNo = 0; if (g_dbInfos == NULL) {
while ((read_len = getline(&line, &line_len, fp)) != -1) { errorPrint("%s() LN%d, failed to allocate memory\n",
++lineNo; __func__, __LINE__);
if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; goto _exit_failure;
line[--read_len] = '\0'; }
//if (read_len == 0 || isCommentLine(line)) { // line starts with # char command[COMMAND_SIZE];
if (read_len == 0 ) {
continue;
}
if (line[read_len - 1] == '\\') { /* Connect to server */
line[read_len - 1] = ' '; taos = taos_connect(g_args.host, g_args.user, g_args.password,
memcpy(cmd + cmd_len, line, read_len); NULL, g_args.port);
cmd_len += read_len; if (taos == NULL) {
continue; errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
} goto _exit_failure;
}
memcpy(cmd + cmd_len, line, read_len); /* --------------------------------- Main Code -------------------------------- */
cmd[read_len + cmd_len]= '\0'; /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
if (queryDbImpl(taos, cmd)) { /* */
errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", dumpCharset(fp);
__func__, __LINE__, lineNo, fileName);
fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); sprintf(command, "show databases");
cmd_len = 0; result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (lineNo >= lastRowsPrint) { if (code != 0) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName); errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
lastRowsPrint += 5000000; __func__, __LINE__, command, taos_errstr(result));
} goto _exit_failure;
} }
tfree(cmd); TAOS_FIELD *fields = taos_fetch_fields(result);
tfree(line);
fclose(fp);
return 0;
}
static void* dumpInWorkThreadFp(void *arg) while ((row = taos_fetch_row(result)) != NULL) {
{ // sys database name : 'log', but subsequent version changed to 'log'
threadInfo *pThread = (threadInfo*)arg; if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
setThreadName("dumpInWorkThrd"); fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
&& (!g_args.allow_sys)) {
continue;
}
for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { if (g_args.databases) { // input multi dbs
if (f % pThread->totalThreads == pThread->threadIndex) { if (inDatabasesSeq(
char *SQLFileName = g_tsDumpInSqlFiles[f]; (char *)row[TSDB_SHOW_DB_NAME_INDEX],
FILE* fp = taosOpenDumpInFile(SQLFileName); fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
if (NULL == fp) {
continue; continue;
} }
fprintf(stderr, ", Success Open input file: %s\n", } else if (!g_args.all_databases) { // only input one db
SQLFileName); if (strncasecmp(g_args.arg_list[0],
dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName); (char *)row[TSDB_SHOW_DB_NAME_INDEX],
fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
continue;
} }
}
return NULL; g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
} if (g_dbInfos[count] == NULL) {
errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
static void startDumpInWorkThreads() __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
{ goto _exit_failure;
pthread_attr_t thattr; }
threadInfo *pThread;
int32_t totalThreads = g_args.thread_num;
if (totalThreads > g_tsSqlFileNum) {
totalThreads = g_tsSqlFileNum;
}
threadInfo *threadObj = (threadInfo *)calloc( okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
totalThreads, sizeof(threadInfo)); tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
min(TSDB_DB_NAME_LEN,
fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
if (g_args.with_property) {
g_dbInfos[count]->ntables =
*((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
g_dbInfos[count]->vgroups =
*((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
g_dbInfos[count]->replica =
*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
g_dbInfos[count]->quorum =
*((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
g_dbInfos[count]->days =
*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
if (NULL == threadObj) { tstrncpy(g_dbInfos[count]->keeplist,
errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
} min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
//g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//g_dbInfos[count]->daysToKeep1;
//g_dbInfos[count]->daysToKeep2;
g_dbInfos[count]->cache =
*((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
g_dbInfos[count]->blocks =
*((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
g_dbInfos[count]->minrows =
*((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
g_dbInfos[count]->maxrows =
*((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
g_dbInfos[count]->wallevel =
*((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
g_dbInfos[count]->fsync =
*((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
g_dbInfos[count]->comp =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
g_dbInfos[count]->cachelast =
(int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
for (int32_t t = 0; t < totalThreads; ++t) { tstrncpy(g_dbInfos[count]->precision,
pThread = threadObj + t; (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
pThread->threadIndex = t; DB_PRECISION_LEN);
pThread->totalThreads = totalThreads; g_dbInfos[count]->update =
pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
NULL, g_args.port);
if (pThread->taos == NULL) {
errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
free(threadObj);
return;
} }
pthread_attr_init(&thattr); count++;
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&(pThread->threadID), &thattr, if (g_args.databases) {
dumpInWorkThreadFp, (void*)pThread) != 0) { if (count > g_args.dumpDbCount)
errorPrint("%s() LN%d, thread:%d failed to start\n", break;
__func__, __LINE__, pThread->threadIndex); } else if (!g_args.all_databases) {
exit(0); if (count >= 1)
break;
} }
} }
for (int t = 0; t < totalThreads; ++t) { if (count == 0) {
pthread_join(threadObj[t].threadID, NULL); errorPrint("%d databases valid to dump\n", count);
} goto _exit_failure;
for (int t = 0; t < totalThreads; ++t) {
taos_close(threadObj[t].taos);
} }
free(threadObj);
}
static int dumpIn() {
assert(g_args.isDumpIn);
TAOS *taos = NULL; if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
FILE *fp = NULL; for (int i = 0; i < count; i++) {
int64_t records = 0;
records = dumpWholeDatabase(g_dbInfos[i], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[i]->name);
g_totalDumpOutRows += records;
}
}
} else {
if (1 == g_args.arg_list_len) {
int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
if (records >= 0) {
okPrint("Database %s dumped\n", g_dbInfos[0]->name);
g_totalDumpOutRows += records;
}
} else {
dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
}
taos = taos_connect( int superTblCnt = 0 ;
g_args.host, g_args.user, g_args.password, for (int i = 1; g_args.arg_list[i]; i++) {
NULL, g_args.port); TableRecordInfo tableRecordInfo;
if (taos == NULL) {
errorPrint("%s() LN%d, failed to connect to TDengine server\n",
__func__, __LINE__);
return -1;
}
taosGetDirectoryFileList(g_args.inpath); if (getTableRecordInfo(g_dbInfos[0]->name,
g_args.arg_list[i],
&tableRecordInfo) < 0) {
errorPrint("input the invalid table %s\n",
g_args.arg_list[i]);
continue;
}
int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum; int64_t records = 0;
if (g_tsDbSqlFile[0] != 0) { if (tableRecordInfo.isStb) { // dump all table of this stable
tsSqlFileNumOfTbls--; int ret = dumpStableClasuse(
taos,
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp);
if (ret >= 0) {
superTblCnt++;
records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
}
} else if (tableRecordInfo.belongStb){
dumpStableClasuse(
taos,
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
fp);
records = dumpNormalTableBelongStb(
taos,
g_dbInfos[0],
tableRecordInfo.tableRecord.stable,
g_args.arg_list[i]);
} else {
records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]);
}
fp = taosOpenDumpInFile(g_tsDbSqlFile); if (records >= 0) {
if (NULL == fp) { okPrint("table: %s dumped\n", g_args.arg_list[i]);
errorPrint("%s() LN%d, failed to open input file %s\n", g_totalDumpOutRows += records;
__func__, __LINE__, g_tsDbSqlFile); }
return -1;
} }
fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
loadFileCharset(fp, g_tsCharset);
dumpInOneFile(taos, fp, g_tsCharset, g_args.encode,
g_tsDbSqlFile);
} }
taos_close(taos); taos_close(taos);
if (0 != tsSqlFileNumOfTbls) { /* Close the handle and return */
startDumpInWorkThreads(); fclose(fp);
} taos_free_result(result);
freeDbInfos();
freeDumpFiles(); fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return 0; return 0;
_exit_failure:
fclose(fp);
taos_close(taos);
taos_free_result(result);
freeDbInfos();
errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return -1;
} }
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
...@@ -2988,7 +3910,10 @@ int main(int argc, char *argv[]) { ...@@ -2988,7 +3910,10 @@ int main(int argc, char *argv[]) {
printf("databasesSeq: %s\n", g_args.databasesSeq); printf("databasesSeq: %s\n", g_args.databasesSeq);
printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
printf("with_property: %s\n", g_args.with_property?"true":"false"); printf("with_property: %s\n", g_args.with_property?"true":"false");
#ifdef AVRO_SUPPORT
printf("avro format: %s\n", g_args.avro?"true":"false"); printf("avro format: %s\n", g_args.avro?"true":"false");
printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
#endif
printf("start_time: %" PRId64 "\n", g_args.start_time); printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("human readable start time: %s \n", g_args.humanStartTime); printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time); printf("end_time: %" PRId64 "\n", g_args.end_time);
...@@ -3042,7 +3967,10 @@ int main(int argc, char *argv[]) { ...@@ -3042,7 +3967,10 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq); fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
#ifdef AVRO_SUPPORT
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
#endif
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
...@@ -3072,6 +4000,7 @@ int main(int argc, char *argv[]) { ...@@ -3072,6 +4000,7 @@ int main(int argc, char *argv[]) {
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (dumpIn() < 0) { if (dumpIn() < 0) {
errorPrint("%s\n", "dumpIn() failed!");
ret = -1; ret = -1;
} }
} else { } else {
...@@ -3103,4 +4032,3 @@ int main(int argc, char *argv[]) { ...@@ -3103,4 +4032,3 @@ int main(int argc, char *argv[]) {
return ret; return ret;
} }
CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine) PROJECT(TDengine)
if(NOT WIN32)
string(ASCII 27 Esc)
set(ColourReset "${Esc}[m")
set(ColourBold "${Esc}[1m")
set(Red "${Esc}[31m")
set(Green "${Esc}[32m")
set(Yellow "${Esc}[33m")
set(Blue "${Esc}[34m")
set(Magenta "${Esc}[35m")
set(Cyan "${Esc}[36m")
set(White "${Esc}[37m")
set(BoldRed "${Esc}[1;31m")
set(BoldGreen "${Esc}[1;32m")
set(BoldYellow "${Esc}[1;33m")
set(BoldBlue "${Esc}[1;34m")
set(BoldMagenta "${Esc}[1;35m")
set(BoldCyan "${Esc}[1;36m")
set(BoldWhite "${Esc}[1;37m")
endif()
ADD_SUBDIRECTORY(monitor) ADD_SUBDIRECTORY(monitor)
IF (TD_BUILD_HTTP) IF (TD_BUILD_HTTP)
...@@ -57,8 +37,15 @@ ELSE () ...@@ -57,8 +37,15 @@ ELSE ()
DEPENDS taos DEPENDS taos
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config" CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config"
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}" BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}"
INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/ INSTALL_COMMAND
COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 && ./upx blm3
COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./blm3.service ${CMAKE_BINARY_DIR}/test/cfg/
) )
ENDIF () ENDIF ()
......
Subproject commit ba539ce69dc4fe53536e9b0517fe75917dce5c46 Subproject commit 598cb96ee60ec6a16c5b8b07ea8ca9748799e7e1
...@@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) { ...@@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) {
TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext;
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext, HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext,
sizeof(TSDB_CACHE_PTR_TYPE), 3000); sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive);
pContext->ppContext = ppContext; pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
......
...@@ -402,7 +402,7 @@ typedef struct SQInfo { ...@@ -402,7 +402,7 @@ typedef struct SQInfo {
int32_t dataReady; // denote if query result is ready or not int32_t dataReady; // denote if query result is ready or not
void* rspContext; // response context void* rspContext; // response context
int64_t startExecTs; // start to exec timestamp int64_t startExecTs; // start to exec timestamp
int64_t lastRetrieveTs; // last retrieve timestamp int64_t lastRetrieveTs; // last retrieve timestamp
char* sql; // query sql string char* sql; // query sql string
SQueryCostInfo summary; SQueryCostInfo summary;
} SQInfo; } SQInfo;
......
...@@ -4188,31 +4188,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data ...@@ -4188,31 +4188,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
compSizes = tcalloc(numOfCols, sizeof(int32_t)); compSizes = tcalloc(numOfCols, sizeof(int32_t));
} }
if (pQueryAttr->pExpr2 == NULL) { for (int32_t col = 0; col < numOfCols; ++col) {
for (int32_t col = 0; col < numOfCols; ++col) { SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); if (compressed) {
if (compressed) { compSizes[col] = compressQueryColData(pColRes, numOfRows, data, compressed);
compSizes[col] = compressQueryColData(pColRes, pRes->info.rows, data, compressed); data += compSizes[col];
data += compSizes[col]; *compLen += compSizes[col];
*compLen += compSizes[col]; compSizes[col] = htonl(compSizes[col]);
compSizes[col] = htonl(compSizes[col]); } else {
} else { memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows); data += pColRes->info.bytes * numOfRows;
data += pColRes->info.bytes * pRes->info.rows;
}
}
} else {
for (int32_t col = 0; col < numOfCols; ++col) {
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
if (compressed) {
compSizes[col] = htonl(compressQueryColData(pColRes, numOfRows, data, compressed));
data += compSizes[col];
*compLen += compSizes[col];
compSizes[col] = htonl(compSizes[col]);
} else {
memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
data += pColRes->info.bytes * numOfRows;
}
} }
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
extern "C" { extern "C" {
#endif #endif
#define TSDB_CFG_MAX_NUM 128 #define TSDB_CFG_MAX_NUM 130
#define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41 #define TSDB_CFG_VALUE_LEN 41
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version> <version>2.0.34</version>
</dependency> </dependency>
<!-- druid --> <!-- druid -->
<dependency> <dependency>
......
...@@ -10,4 +10,4 @@ ...@@ -10,4 +10,4 @@
| 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces | | 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces |
more detail: https://www.taosdata.com/cn//documentation20/connector-java/ more detail: https://www.taosdata.com/cn/documentation20/connector/java
\ No newline at end of file
...@@ -27,7 +27,7 @@ python3 ./test.py -f insert/bug3654.py ...@@ -27,7 +27,7 @@ python3 ./test.py -f insert/bug3654.py
python3 ./test.py -f insert/insertDynamicColBeforeVal.py python3 ./test.py -f insert/insertDynamicColBeforeVal.py
python3 ./test.py -f insert/in_function.py python3 ./test.py -f insert/in_function.py
python3 ./test.py -f insert/modify_column.py python3 ./test.py -f insert/modify_column.py
python3 ./test.py -f insert/line_insert.py #python3 ./test.py -f insert/line_insert.py
python3 ./test.py -f insert/specialSql.py python3 ./test.py -f insert/specialSql.py
# timezone # timezone
...@@ -416,9 +416,9 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py ...@@ -416,9 +416,9 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py
python3 ./test.py -f query/queryRegex.py python3 ./test.py -f query/queryRegex.py
python3 ./test.py -f tools/taosdemoTestdatatype.py python3 ./test.py -f tools/taosdemoTestdatatype.py
python3 ./test.py -f insert/schemalessInsert.py #python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py #python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py
python3 ./test.py -f insert/openTsdbJsonInsert.py #python3 ./test.py -f insert/openTsdbJsonInsert.py
#======================p4-end=============== #======================p4-end===============
......
...@@ -31,9 +31,8 @@ class TDTestCase: ...@@ -31,9 +31,8 @@ class TDTestCase:
tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdLog.info('=============== step2,create table增加了转义字符') tdLog.info('=============== step2,create table with escape character')
tdLog.info('create table tb1 using stb1 tags("abc\\"def")') tdLog.info('create table tb1 using stb1 tags("abc\\"def")')
#增加了转义字符\
tdSql.execute('create table tb1 using stb1 tags("abc\\"def")') tdSql.execute('create table tb1 using stb1 tags("abc\\"def")')
tdLog.info('=============== step3,insert data') tdLog.info('=============== step3,insert data')
......
...@@ -233,5 +233,5 @@ void shellParseArgument(int argc, char *argv[]) { ...@@ -233,5 +233,5 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC);
pPrint("%s replica:%d %s", GREEN, replica, NC); pPrint("%s replica:%d %s", GREEN, replica, NC);
pPrint("%s start create table performace test %s", GREEN, NC); pPrint("%s start create table performance test %s", GREEN, NC);
} }
...@@ -221,5 +221,5 @@ void shellParseArgument(int argc, char *argv[]) { ...@@ -221,5 +221,5 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC);
pPrint("%s replica:%d %s", GREEN, replica, NC); pPrint("%s replica:%d %s", GREEN, replica, NC);
pPrint("%s start create table performace test %s", GREEN, NC); pPrint("%s start create table performance test %s", GREEN, NC);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册