提交 d44ac61d 编写于 作者: S shenglian zhou

Merge remote-tracking branch 'origin/develop' into szhou/hotfix/order-by-tbname

...@@ -107,7 +107,77 @@ def pre_test(){ ...@@ -107,7 +107,77 @@ def pre_test(){
make > /dev/null make > /dev/null
make install > /dev/null make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/ pip3 install ${WKC}/src/connector/python/
'''
return 1
}
def pre_test_noinstall(){
sh'hostname'
sh'''
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WKC}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WKC}
git checkout 2.0
'''
}
else{
sh '''
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
git submodule update --init --recursive
cd ${WK}
git reset --hard HEAD~10
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WK}
git checkout 2.0
'''
}
else{
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
export TZ=Asia/Harbin
date
git clean -dfx
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
''' '''
return 1 return 1
} }
...@@ -201,8 +271,8 @@ pipeline { ...@@ -201,8 +271,8 @@ pipeline {
stage('pre_build'){ stage('pre_build'){
agent{label 'master'} agent{label 'master'}
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when{ when {
changeRequest() changeRequest()
} }
steps { steps {
script{ script{
...@@ -322,21 +392,9 @@ pipeline { ...@@ -322,21 +392,9 @@ pipeline {
''' '''
sh ''' sh '''
cd ${WKC}/src/connector/node-rest/ cd ${WKC}/tests/examples/C#/taosdemo
npm install mcs -out:taosdemo *.cs > /dev/null 2>&1
npm run build echo '' |./taosdemo -c /etc/taos
npm run build:test
npm run test
'''
sh '''
cd ${WKC}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs > /dev/null 2>&1
echo '' |./taosdemo -c /etc/taos
cd ${WKC}/tests/connectorTest/C#Test/nanosupport
mcs -out:nano *.cs > /dev/null 2>&1
echo '' |./nano
''' '''
sh ''' sh '''
cd ${WKC}/tests/gotest cd ${WKC}/tests/gotest
...@@ -469,7 +527,61 @@ pipeline { ...@@ -469,7 +527,61 @@ pipeline {
} }
} }
} }
stage('arm64centos7') {
agent{label " arm64centos7 "}
steps {
pre_test_noinstall()
}
}
stage('arm64centos8') {
agent{label " arm64centos8 "}
steps {
pre_test_noinstall()
}
}
stage('arm32bionic') {
agent{label " arm32bionic "}
steps {
pre_test_noinstall()
}
}
stage('arm64bionic') {
agent{label " arm64bionic "}
steps {
pre_test_noinstall()
}
}
stage('arm64focal') {
agent{label " arm64focal "}
steps {
pre_test_noinstall()
}
}
stage('centos7') {
agent{label " centos7 "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:trusty') {
agent{label " trusty "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:xenial') {
agent{label " xenial "}
steps {
pre_test_noinstall()
}
}
stage('ubuntu:bionic') {
agent{label " bionic "}
steps {
pre_test_noinstall()
}
}
stage('build'){ stage('build'){
agent{label " wintest "} agent{label " wintest "}
steps { steps {
......
IF (TD_LINUX) IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
IF (TD_POWER) IF (TD_POWER)
SET(CMAKE_INSTALL_PREFIX C:/PowerDB) SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
...@@ -41,6 +40,5 @@ ELSEIF (TD_WINDOWS) ...@@ -41,6 +40,5 @@ ELSEIF (TD_WINDOWS)
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
ENDIF () ENDIF ()
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
## <a class="anchor" id="grafana"></a>Grafana ## <a class="anchor" id="grafana"></a>Grafana
TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。
### 安装Grafana ### 安装Grafana
...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 配置Grafana ### 配置Grafana
TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。 TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。
以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: 以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
``` ```
Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
```ini
[plugins] [plugins]
enable_alpha = true allow_loading_unsigned_plugins = tdengine-datasource
allow_loading_unsigned_plugins = taosdata-tdengine-datasource
``` ```
### 使用 Grafana ### 使用 Grafana
...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
* ALIAS BY:可设置当前查询别名。 * ALIAS BY:可设置当前查询别名。
* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 * GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![img](../images/connections/create_dashboard2.jpg) ![img](../images/connections/create_dashboard2.jpg)
...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource ...@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
#### 导入 Dashboard #### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard 我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件 点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
导入完成之后可看到如下效果: 导入完成之后可看到如下效果:
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a>MATLAB ## <a class="anchor" id="matlab"></a>MATLAB
......
...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 下载 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 修改 /etc/telegraf/telegraf.conf ### 修改 /etc/telegraf/telegraf.conf
...@@ -61,7 +63,7 @@ sudo systemctl start telegraf ...@@ -61,7 +63,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。 点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘: 点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) ![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png)
......
...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置 ## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录 ### 复制 TDengine 插件到 grafana 插件目录
```
1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine ```bash
2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
4. sudo systemctl restart grafana-server.service 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
``` ```
### 配置 collectd ### 配置 collectd
...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S ...@@ -62,13 +64,13 @@ repeater 部分添加 { host:'<TDengine server/cluster host>', port: <port for S
#### 导入 collectd 仪表盘 #### 导入 collectd 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示选择 JSON 文件导入。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png) ![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png)
#### 导入 StatsD 仪表盘 #### 导入 StatsD 仪表盘
点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 文件。如果安装 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件再导入。之后可以看到如下界面的仪表盘: 从 https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json 下载 dashboard json 文件,点击左侧加号图标并选择 Import,按照界面提示导入JSON文件。之后可以看到如下界面的仪表盘:
![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png) ![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png)
## 总结 ## 总结
......
...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download. ...@@ -12,12 +12,17 @@ https://grafana.com/grafana/download.
### Configure Grafana ### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory. Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> .
```bash
GF_VERSION=3.1.1
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash ```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
``` ```
### Use Grafana ### Use Grafana
...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp ...@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard #### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`. We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file: Click the `Import` button on the left panel and load the grafana id:
![img](../images/connections/import_dashboard1.jpg) ![img](../images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported. You can see as follows after Dashboard imported.
![img](../images/connections/import_dashboard2.jpg) ![img](../images/connections/dashboard-15146.png)
## <a class="anchor" id="matlab"></a> MATLAB ## <a class="anchor" id="matlab"></a> MATLAB
......
...@@ -68,12 +68,6 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat ...@@ -68,12 +68,6 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
else
echo "grafanaplugin bundled directory not found!"
exit 1
fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
......
...@@ -213,7 +213,7 @@ else ...@@ -213,7 +213,7 @@ else
exit 1 exit 1
fi fi
make make -j8
cd ${curr_dir} cd ${curr_dir}
......
...@@ -73,12 +73,6 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin ...@@ -73,12 +73,6 @@ cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
else
echo grafanaplugin bundled directory not found!
exit 1
fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
......
...@@ -381,11 +381,6 @@ function install_data() { ...@@ -381,11 +381,6 @@ function install_data() {
} }
function install_connector() { function install_connector() {
if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else else
......
...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then ...@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector" ...@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo ...@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then #if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: # cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
# else
# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
# fi
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then # if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector # cp -r ${connector_dir}/go ${install_dir}/connector
# else # else
......
...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector ...@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
else
echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
else else
......
...@@ -155,6 +155,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo); ...@@ -155,6 +155,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscGroupbyTag(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo); int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo);
......
...@@ -239,6 +239,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv ...@@ -239,6 +239,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp
(JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong); (JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: insertLinesImp
* Signature: ([Ljava/lang/String;JII)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp
(JNIEnv *, jobject, jobjectArray, jlong, jint, jint);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -988,7 +988,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI ...@@ -988,7 +988,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
return JNI_SUCCESS; return JNI_SUCCESS;
} }
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj, JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
jobjectArray lines, jlong conn, jobjectArray lines, jlong conn,
jint protocol, jint precision) { jint protocol, jint precision) {
TAOS *taos = (TAOS *)conn; TAOS *taos = (TAOS *)conn;
...@@ -1019,9 +1019,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J ...@@ -1019,9 +1019,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J
tfree(c_lines); tfree(c_lines);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, taos, tstrerror(code), taos_errstr(result)); jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, taos, tstrerror(code), taos_errstr(result));
taos_free_result((void *)result);
return JNI_TDENGINE_ERROR; return JNI_TDENGINE_ERROR;
} }
taos_free_result((void *)result);
return (jlong)result; return JNI_SUCCESS;
} }
...@@ -762,35 +762,32 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) { ...@@ -762,35 +762,32 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
if (!dataBuf->ordered) { if (!dataBuf->ordered) {
char *pBlockData = pBlocks->data; char *pBlockData = pBlocks->data;
qsort(pBlockData, pBlocks->numOfRows, dataBuf->rowSize, rowDataCompar); qsort(pBlockData, pBlocks->numOfRows, dataBuf->rowSize, rowDataCompar);
dataBuf->ordered = true;
int32_t i = 0; if(tsClientMerge) {
int32_t j = 1; int32_t i = 0;
int32_t j = 1;
while (j < pBlocks->numOfRows) { while (j < pBlocks->numOfRows) {
TSKEY ti = *(TSKEY *)(pBlockData + dataBuf->rowSize * i); TSKEY ti = *(TSKEY *)(pBlockData + dataBuf->rowSize * i);
TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j); TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j);
if (ti == tj) { if (ti == tj) {
if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) { if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize); memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
}
++j;
continue;
} }
int32_t nextPos = (++i);
if (nextPos != j) {
memmove(pBlockData + dataBuf->rowSize * nextPos, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
}
++j; ++j;
continue; }
} pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
int32_t nextPos = (++i);
if (nextPos != j) {
memmove(pBlockData + dataBuf->rowSize * nextPos, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
}
++j;
} }
dataBuf->ordered = true;
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
} }
dataBuf->prevTS = INT64_MIN; dataBuf->prevTS = INT64_MIN;
...@@ -836,32 +833,33 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk ...@@ -836,32 +833,33 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
if (!dataBuf->ordered) { if (!dataBuf->ordered) {
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar); qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
dataBuf->ordered = true;
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; if(tsClientMerge) {
int32_t i = 0; pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
int32_t j = 1; int32_t i = 0;
while (j < nRows) { int32_t j = 1;
TSKEY ti = (pBlkKeyTuple + i)->skey; while (j < nRows) {
TSKEY tj = (pBlkKeyTuple + j)->skey; TSKEY ti = (pBlkKeyTuple + i)->skey;
TSKEY tj = (pBlkKeyTuple + j)->skey;
if (ti == tj) {
if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) { if (ti == tj) {
memmove(pBlkKeyTuple + i, pBlkKeyTuple + j, sizeof(SBlockKeyTuple)); if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
memmove(pBlkKeyTuple + i, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
}
++j;
continue;
} }
int32_t nextPos = (++i);
if (nextPos != j) {
memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
}
++j; ++j;
continue;
}
int32_t nextPos = (++i);
if (nextPos != j) {
memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
} }
++j; pBlocks->numOfRows = i + 1;
} }
dataBuf->ordered = true;
pBlocks->numOfRows = i + 1;
} }
dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize; dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include "tscParseLine.h" #include "tscParseLine.h"
typedef struct { typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN]; char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SHashObj* tagHash; SHashObj* tagHash;
SHashObj* fieldHash; SHashObj* fieldHash;
SArray* tags; //SArray<SSchema> SArray* tags; //SArray<SSchema>
...@@ -64,13 +64,13 @@ typedef enum { ...@@ -64,13 +64,13 @@ typedef enum {
} ESchemaAction; } ESchemaAction;
typedef struct { typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN]; char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SArray* tags; //SArray<SSchema> SArray* tags; //SArray<SSchema>
SArray* fields; //SArray<SSchema> SArray* fields; //SArray<SSchema>
} SCreateSTableActionInfo; } SCreateSTableActionInfo;
typedef struct { typedef struct {
char sTableName[TSDB_TABLE_NAME_LEN]; char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SSchema* field; SSchema* field;
} SAlterSTableActionInfo; } SAlterSTableActionInfo;
...@@ -155,13 +155,13 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa ...@@ -155,13 +155,13 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv); qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
SStringBuilder sb; memset(&sb, 0, sizeof(sb)); SStringBuilder sb; memset(&sb, 0, sizeof(sb));
char sTableName[TSDB_TABLE_NAME_LEN] = {0}; char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(sTableName, point->stableName); strtolower(sTableName, point->stableName);
taosStringBuilderAppendString(&sb, sTableName); taosStringBuilderAppendString(&sb, sTableName);
for (int j = 0; j < point->tagNum; ++j) { for (int j = 0; j < point->tagNum; ++j) {
taosStringBuilderAppendChar(&sb, ','); taosStringBuilderAppendChar(&sb, ',');
TAOS_SML_KV* tagKv = point->tags + j; TAOS_SML_KV* tagKv = point->tags + j;
char tagName[TSDB_COL_NAME_LEN] = {0}; char tagName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(tagName, tagKv->key); strtolower(tagName, tagKv->key);
taosStringBuilderAppendString(&sb, tagName); taosStringBuilderAppendString(&sb, tagName);
taosStringBuilderAppendChar(&sb, '='); taosStringBuilderAppendChar(&sb, '=');
...@@ -214,8 +214,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, ...@@ -214,8 +214,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
for (int j = 0; j < point->tagNum; ++j) { for (int j = 0; j < point->tagNum; ++j) {
TAOS_SML_KV* tagKv = point->tags + j; TAOS_SML_KV* tagKv = point->tags + j;
if (!point->childTableName) { if (!point->childTableName) {
char childTableName[TSDB_TABLE_NAME_LEN]; char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
int32_t tableNameLen = TSDB_TABLE_NAME_LEN; int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE;
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info); getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
point->childTableName = calloc(1, tableNameLen+1); point->childTableName = calloc(1, tableNameLen+1);
strncpy(point->childTableName, childTableName, tableNameLen); strncpy(point->childTableName, childTableName, tableNameLen);
...@@ -261,7 +261,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, ...@@ -261,7 +261,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[], static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[],
SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) { SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) {
char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0}; char fieldNameLowerCase[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(fieldNameLowerCase, pointColField->name); strtolower(fieldNameLowerCase, pointColField->name);
size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase)); size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase));
...@@ -281,7 +281,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash ...@@ -281,7 +281,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE; action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
} }
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo)); memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN); memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
action->alterSTable.field = pointColField; action->alterSTable.field = pointColField;
*actionNeeded = true; *actionNeeded = true;
} }
...@@ -292,7 +292,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash ...@@ -292,7 +292,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_ADD_COLUMN; action->action = SCHEMA_ACTION_ADD_COLUMN;
} }
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo)); memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN); memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
action->alterSTable.field = pointColField; action->alterSTable.field = pointColField;
*actionNeeded = true; *actionNeeded = true;
} }
...@@ -536,7 +536,7 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl ...@@ -536,7 +536,7 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl
tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName); tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName);
char tableNameLowerCase[TSDB_TABLE_NAME_LEN]; char tableNameLowerCase[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
strtolower(tableNameLowerCase, tableName); strtolower(tableNameLowerCase, tableName);
char sql[256]; char sql[256];
...@@ -623,7 +623,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* ...@@ -623,7 +623,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchemaAction schemaAction = {0}; SSchemaAction schemaAction = {0};
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE; schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo)); memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo));
memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN); memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
schemaAction.createSTable.tags = pointSchema->tags; schemaAction.createSTable.tags = pointSchema->tags;
schemaAction.createSTable.fields = pointSchema->fields; schemaAction.createSTable.fields = pointSchema->fields;
applySchemaAction(taos, &schemaAction, info); applySchemaAction(taos, &schemaAction, info);
...@@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* ...@@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
if (code != 0) { if (code != 0) {
tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName); tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
return code; return code;
} else {
pointSchema->precision = dbSchema.precision;
destroySmlSTableSchema(&dbSchema);
} }
} else if (code == TSDB_CODE_SUCCESS) { }
if (code == TSDB_CODE_SUCCESS) {
pointSchema->precision = dbSchema.precision;
size_t pointTagSize = taosArrayGetSize(pointSchema->tags); size_t pointTagSize = taosArrayGetSize(pointSchema->tags);
size_t pointFieldSize = taosArrayGetSize(pointSchema->fields); size_t pointFieldSize = taosArrayGetSize(pointSchema->fields);
...@@ -659,7 +659,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* ...@@ -659,7 +659,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchema* pointColTs = taosArrayGet(pointSchema->fields, 0); SSchema* pointColTs = taosArrayGet(pointSchema->fields, 0);
SSchema* dbColTs = taosArrayGet(dbSchema.fields, 0); SSchema* dbColTs = taosArrayGet(dbSchema.fields, 0);
memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN); memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
for (int j = 1; j < pointFieldSize; ++j) { for (int j = 1; j < pointFieldSize; ++j) {
SSchema* pointCol = taosArrayGet(pointSchema->fields, j); SSchema* pointCol = taosArrayGet(pointSchema->fields, j);
......
...@@ -2454,6 +2454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2454,6 +2454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg12 = "parameter is out of range [1, 100]"; const char* msg12 = "parameter is out of range [1, 100]";
const char* msg13 = "parameter list required"; const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
const char* msg15 = "parameter is out of range [1, 1000]";
switch (functionId) { switch (functionId) {
case TSDB_FUNC_COUNT: { case TSDB_FUNC_COUNT: {
...@@ -2901,11 +2902,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2901,11 +2902,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} }
} }
} else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
int64_t numRowsSelected = GET_INT32_VAL(val); int64_t numRowsSelected = GET_INT64_VAL(val);
if (numRowsSelected <= 0 || numRowsSelected > 1000) { if (numRowsSelected <= 0 || numRowsSelected > 1000) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
} }
// todo REFACTOR // todo REFACTOR
...@@ -5022,6 +5027,7 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr ...@@ -5022,6 +5027,7 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
const char* msg1 = "super table join requires tags column"; const char* msg1 = "super table join requires tags column";
const char* msg2 = "timestamp join condition missing"; const char* msg2 = "timestamp join condition missing";
const char* msg3 = "condition missing for join query"; const char* msg3 = "condition missing for join query";
const char* msg4 = "only ts column join allowed";
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
if (pQueryInfo->numOfTables == 1) { if (pQueryInfo->numOfTables == 1) {
...@@ -5039,6 +5045,8 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr ...@@ -5039,6 +5045,8 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if (pCondExpr->pJoinExpr == NULL) { if (pCondExpr->pJoinExpr == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
} else if ((!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) && pCondExpr->pJoinExpr) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
} }
if (!pCondExpr->tsJoin) { if (!pCondExpr->tsJoin) {
...@@ -5593,10 +5601,14 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo ...@@ -5593,10 +5601,14 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg4 = "illegal value or data overflow"; const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query"; const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now"; const char* msg6 = "not supported function now";
const char* msg7 = "join query not supported fill operation";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
} }
if(QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
/* /*
* fill options are set at the end position, when all columns are set properly * fill options are set at the end position, when all columns are set properly
...@@ -5955,6 +5967,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq ...@@ -5955,6 +5967,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg11); return invalidOperationMsg(pMsgBuf, msg11);
} }
if (udf) {
return invalidOperationMsg(pMsgBuf, msg11);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
...@@ -6372,6 +6388,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -6372,6 +6388,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
//handle Escape character backstick
if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
memmove(name.z, name.z + 1, name.n);
name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0';
name.n -= TS_ESCAPE_CHAR_SIZE;
}
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsg, msg17); return invalidOperationMsg(pMsg, msg17);
} }
...@@ -9210,6 +9232,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -9210,6 +9232,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo); pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo); pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo); pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
pQueryInfo->groupbyTag = tscGroupbyTag(pQueryInfo);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
......
...@@ -752,7 +752,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *, ...@@ -752,7 +752,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *,
void taos_close_stream(TAOS_STREAM *handle) { void taos_close_stream(TAOS_STREAM *handle) {
SSqlStream *pStream = (SSqlStream *)handle; SSqlStream *pStream = (SSqlStream *)handle;
SSqlObj *pSql = (SSqlObj *)atomic_exchange_ptr(&pStream->pSql, 0); SSqlObj *pSql = pStream->pSql;
if (pSql == NULL) { if (pSql == NULL) {
return; return;
} }
...@@ -763,13 +763,13 @@ void taos_close_stream(TAOS_STREAM *handle) { ...@@ -763,13 +763,13 @@ void taos_close_stream(TAOS_STREAM *handle) {
*/ */
if (pSql->signature == pSql) { if (pSql->signature == pSql) {
tscRemoveFromStreamList(pStream, pSql); tscRemoveFromStreamList(pStream, pSql);
pStream->pSql = NULL;
taosTmrStopA(&(pStream->pTimer)); taosTmrStopA(&(pStream->pTimer));
tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream); tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream);
// notify CQ to release the pStream object // notify CQ to release the pStream object
pStream->fp(pStream->param, NULL, NULL); pStream->fp(pStream->param, NULL, NULL);
pStream->pSql = NULL;
taos_free_result(pSql); taos_free_result(pSql);
tfree(pStream); tfree(pStream);
......
...@@ -2876,7 +2876,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO ...@@ -2876,7 +2876,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
SSqlObj *userSql = pParentSql->rootObj; SSqlObj *userSql = pParentSql->rootObj;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
if (userSql != pParentSql) { if (userSql != pParentSql && pParentSql->freeParam != NULL) {
(*pParentSql->freeParam)(&pParentSql->param); (*pParentSql->freeParam)(&pParentSql->param);
} }
...@@ -3729,6 +3729,25 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) { ...@@ -3729,6 +3729,25 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData; return hasData;
} }
void tscSetQuerySort(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr) {
if (pQueryInfo->interval.interval <= 0) {
return;
}
if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
size_t size = taosArrayGetSize(pQueryInfo->pUpstream);
for(int32_t i = 0; i < size; ++i) {
SQueryInfo* pq = taosArrayGetP(pQueryInfo->pUpstream, i);
if (pq->groupbyTag && pq->interval.interval > 0) {
pQueryAttr->needSort = true;
return;
}
}
}
}
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator, void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
char* sql, void* merger, int32_t stage, uint64_t qId) { char* sql, void* merger, int32_t stage, uint64_t qId) {
assert(pQueryInfo != NULL); assert(pQueryInfo != NULL);
...@@ -3831,6 +3850,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr ...@@ -3831,6 +3850,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
SArray* pa = NULL; SArray* pa = NULL;
if (stage == MASTER_SCAN) { if (stage == MASTER_SCAN) {
pQueryAttr->createFilterOperator = false; // no need for parent query pQueryAttr->createFilterOperator = false; // no need for parent query
tscSetQuerySort(pQueryInfo, pQueryAttr);
pa = createExecOperatorPlan(pQueryAttr); pa = createExecOperatorPlan(pQueryAttr);
} else { } else {
pa = createGlobalMergePlan(pQueryAttr); pa = createGlobalMergePlan(pQueryAttr);
......
...@@ -414,6 +414,19 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { ...@@ -414,6 +414,19 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false; return false;
} }
bool tscGroupbyTag(SQueryInfo* pQueryInfo) {
SGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
if (TSDB_COL_IS_TAG(pIndex->flag)) { // group by tag
return true;
}
}
return false;
}
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) { int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo); size_t numOfExprs = tscNumOfExprs(pQueryInfo);
...@@ -1256,6 +1269,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t ...@@ -1256,6 +1269,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
} }
*/ */
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) { void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
SSqlRes* pOutput = &pSql->res; SSqlRes* pOutput = &pSql->res;
......
...@@ -339,7 +339,9 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } ...@@ -339,7 +339,9 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int tdAllocMemForCol(SDataCol *pCol, int maxPoints); int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints); void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset);
void dataColSetOffset(SDataCol *pCol, int nEle); void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle);
...@@ -670,7 +672,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) { ...@@ -670,7 +672,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) {
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r)) #define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row); SMemRow tdMemRowDup(SMemRow row);
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull); void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset);
// NOTE: offset here including the header size // NOTE: offset here including the header size
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) { static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
......
...@@ -217,6 +217,8 @@ extern int32_t wDebugFlag; ...@@ -217,6 +217,8 @@ extern int32_t wDebugFlag;
extern int32_t cqDebugFlag; extern int32_t cqDebugFlag;
extern int32_t debugFlag; extern int32_t debugFlag;
extern int8_t tsClientMerge;
#ifdef TD_TSZ #ifdef TD_TSZ
// lossy // lossy
extern char lossyColumns[]; extern char lossyColumns[];
...@@ -232,6 +234,7 @@ extern int8_t tsDeadLockKillQuery; ...@@ -232,6 +234,7 @@ extern int8_t tsDeadLockKillQuery;
// schemaless // schemaless
extern char tsDefaultJSONStrType[]; extern char tsDefaultJSONStrType[];
typedef struct { typedef struct {
char dir[TSDB_FILENAME_LEN]; char dir[TSDB_FILENAME_LEN];
int level; int level;
......
...@@ -239,9 +239,12 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) { ...@@ -239,9 +239,12 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->len = 0; pDataCol->len = 0;
} }
// value from timestamp should be TKEY here instead of TSKEY /**
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { * value from timestamp should be TKEY here instead of TSKEY.
ASSERT(pCol != NULL && value != NULL); * - rowOffset: 0 for current row, -1 for previous row
*/
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset) {
ASSERT(pCol != NULL && value != NULL && (rowOffset == 0 || rowOffset == -1));
if (isAllRowsNull(pCol)) { if (isAllRowsNull(pCol)) {
if (isNull(value, pCol->type)) { if (isNull(value, pCol->type)) {
...@@ -257,16 +260,29 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo ...@@ -257,16 +260,29 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
} }
if (IS_VAR_DATA_TYPE(pCol->type)) { if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset if (rowOffset == 0) {
pCol->dataOff[numOfRows] = pCol->len; // set offset
// Copy data pCol->dataOff[numOfRows] = pCol->len;
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value)); // Copy data
// Update the length memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
pCol->len += varDataTLen(value); // Update the length
pCol->len += varDataTLen(value);
} else {
// Copy data
void *lastValue = POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows]);
int lastValLen = varDataTLen(lastValue);
memcpy(lastValue, value, varDataTLen(value));
// Update the length
pCol->len -= lastValLen;
pCol->len += varDataTLen(value);
}
} else { } else {
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows); // update the value of last row with increasing the pCol->len and keeping the numOfRows for partial update
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes); ASSERT(pCol->len == (TYPE_BYTES[pCol->type] * (numOfRows - rowOffset)));
pCol->len += pCol->bytes; memcpy(POINTER_SHIFT(pCol->pData, (pCol->len + rowOffset * TYPE_BYTES[pCol->type])), value, pCol->bytes);
if (rowOffset == 0) {
pCol->len += pCol->bytes;
}
} }
return 0; return 0;
} }
...@@ -441,7 +457,8 @@ void tdResetDataCols(SDataCols *pCols) { ...@@ -441,7 +457,8 @@ void tdResetDataCols(SDataCols *pCols) {
} }
} }
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull,
int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0; int rcol = 0;
...@@ -451,7 +468,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -451,7 +468,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
bool setCol = 0; bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) { if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++; dcol++;
continue; continue;
} }
...@@ -460,14 +477,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -460,14 +477,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
if (pRowCol->colId == pDataCol->colId) { if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE); void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1; if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++; dcol++;
rcol++; rcol++;
} else if (pRowCol->colId < pDataCol->colId) { } else if (pRowCol->colId < pDataCol->colId) {
rcol++; rcol++;
} else { } else {
if(forceSetNull || setCol) { if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
} }
dcol++; dcol++;
} }
...@@ -475,7 +492,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols ...@@ -475,7 +492,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
pCols->numOfRows++; pCols->numOfRows++;
} }
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row)); ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0; int rcol = 0;
...@@ -487,7 +504,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -487,7 +504,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
bool setCol = 0; bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]); SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol; ++dcol;
continue; continue;
} }
...@@ -497,14 +514,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -497,14 +514,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) { if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset); void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1; if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol; ++dcol;
++rcol; ++rcol;
} else if (colIdx->colId < pDataCol->colId) { } else if (colIdx->colId < pDataCol->colId) {
++rcol; ++rcol;
} else { } else {
if(forceSetNull || setCol) { if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
} }
++dcol; ++dcol;
} }
...@@ -512,11 +529,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo ...@@ -512,11 +529,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
pCols->numOfRows++; pCols->numOfRows++;
} }
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) { void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
if (isDataRow(row)) { if (isDataRow(row)) {
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull); tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else if (isKvRow(row)) { } else if (isKvRow(row)) {
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull); tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else { } else {
ASSERT(0); ASSERT(0);
} }
...@@ -539,7 +556,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * ...@@ -539,7 +556,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
for (int j = 0; j < source->numOfCols; j++) { for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0 || target->cols[j].len > 0) { if (source->cols[j].len > 0 || target->cols[j].len > 0) {
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
target->maxPoints); target->maxPoints, 0);
} }
} }
target->numOfRows++; target->numOfRows++;
...@@ -583,7 +600,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i ...@@ -583,7 +600,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src1->cols[i].type); ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0 || target->cols[i].len > 0) { if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints); target->maxPoints, 0);
} }
} }
...@@ -595,10 +612,10 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i ...@@ -595,10 +612,10 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src2->cols[i].type); ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) { if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows, dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints); target->maxPoints, 0);
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints); target->maxPoints, 0);
} else if(target->cols[i].len > 0) { } else if(target->cols[i].len > 0) {
dataColSetNullAt(&target->cols[i], target->numOfRows); dataColSetNullAt(&target->cols[i], target->numOfRows);
} }
......
...@@ -268,6 +268,8 @@ int32_t tsdbDebugFlag = 131; ...@@ -268,6 +268,8 @@ int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 131; int32_t cqDebugFlag = 131;
int32_t fsDebugFlag = 135; int32_t fsDebugFlag = 135;
int8_t tsClientMerge = 0;
#ifdef TD_TSZ #ifdef TD_TSZ
// //
// lossy compress 6 // lossy compress 6
...@@ -1642,6 +1644,16 @@ static void doInitGlobalConfig(void) { ...@@ -1642,6 +1644,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "clientMerge";
cfg.ptr = &tsClientMerge;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// default JSON string type option "binary"/"nchar" // default JSON string type option "binary"/"nchar"
cfg.option = "defaultJSONStrType"; cfg.option = "defaultJSONStrType";
cfg.ptr = tsDefaultJSONStrType; cfg.ptr = tsDefaultJSONStrType;
...@@ -1715,9 +1727,9 @@ static void doInitGlobalConfig(void) { ...@@ -1715,9 +1727,9 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM); assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#else #else
assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5); assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif #endif
} }
......
Subproject commit edad746514b2a53a8cf6061c93b98b52a5388692 Subproject commit 9ae793ad2d567eb11d10627b65698f612542e988
...@@ -112,6 +112,7 @@ ...@@ -112,6 +112,7 @@
<include>**/*Test.java</include> <include>**/*Test.java</include>
</includes> </includes>
<excludes> <excludes>
<exclude>**/HttpClientPoolUtilTest.java</exclude>
<exclude>**/AppMemoryLeakTest.java</exclude> <exclude>**/AppMemoryLeakTest.java</exclude>
<exclude>**/JDBCTypeAndTypeCompareTest.java</exclude> <exclude>**/JDBCTypeAndTypeCompareTest.java</exclude>
<exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude> <exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude>
......
package com.taosdata.jdbc;
import java.sql.*;
public class AbstractStatementWrapper extends AbstractStatement{
protected Statement statement;
public AbstractStatementWrapper(Statement statement) {
this.statement = statement;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
return statement.executeQuery(sql);
}
@Override
public int executeUpdate(String sql) throws SQLException {
return statement.executeUpdate(sql);
}
@Override
public void close() throws SQLException {
statement.close();
}
@Override
public boolean execute(String sql) throws SQLException {
return statement.execute(sql);
}
@Override
public ResultSet getResultSet() throws SQLException {
return statement.getResultSet();
}
@Override
public int getUpdateCount() throws SQLException {
return statement.getUpdateCount();
}
@Override
public Connection getConnection() throws SQLException {
return statement.getConnection();
}
@Override
public boolean isClosed() throws SQLException {
return statement.isClosed();
}
}
package com.taosdata.jdbc;
import com.taosdata.jdbc.enums.SchemalessProtocolType;
import com.taosdata.jdbc.enums.SchemalessTimestampType;
import com.taosdata.jdbc.rs.RestfulConnection;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
public class SchemalessStatement extends AbstractStatementWrapper {
public SchemalessStatement(Statement statement) {
super(statement);
}
public void executeSchemaless(String[] strings, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
Connection connection = this.getConnection();
if (connection instanceof TSDBConnection) {
TSDBConnection tsdbConnection = (TSDBConnection) connection;
tsdbConnection.getConnector().insertLines(strings, protocolType, timestampType);
} else if (connection instanceof RestfulConnection) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "restful connection is not supported currently");
} else {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown connection:" + connection.getMetaData().getURL());
}
}
public void executeSchemaless(String sql, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
executeSchemaless(new String[]{sql}, protocolType, timestampType);
}
}
...@@ -32,6 +32,7 @@ public class TSDBErrorNumbers { ...@@ -32,6 +32,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
public static final int ERROR_INVALID_JSON_FORMAT = 0x231b; public static final int ERROR_INVALID_JSON_FORMAT = 0x231b;
public static final int ERROR_HTTP_ENTITY_IS_NULL = 0x231c; //http entity is null
public static final int ERROR_UNKNOWN = 0x2350; //unknown error public static final int ERROR_UNKNOWN = 0x2350; //unknown error
...@@ -74,6 +75,7 @@ public class TSDBErrorNumbers { ...@@ -74,6 +75,7 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_USER_IS_REQUIRED); errorNumbers.add(ERROR_USER_IS_REQUIRED);
errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED); errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
errorNumbers.add(ERROR_INVALID_JSON_FORMAT); errorNumbers.add(ERROR_INVALID_JSON_FORMAT);
errorNumbers.add(ERROR_HTTP_ENTITY_IS_NULL);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
package com.taosdata.jdbc; package com.taosdata.jdbc;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.enums.SchemalessProtocolType;
import com.taosdata.jdbc.enums.SchemalessTimestampType;
import com.taosdata.jdbc.utils.TaosInfo; import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
...@@ -359,14 +361,14 @@ public class TSDBJNIConnector { ...@@ -359,14 +361,14 @@ public class TSDBJNIConnector {
private native int closeStmt(long stmt, long con); private native int closeStmt(long stmt, long con);
public void insertLines(String[] lines) throws SQLException { public void insertLines(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
int code = insertLinesImp(lines, this.taos); int code = insertLinesImp(lines, this.taos, protocolType.ordinal(), timestampType.ordinal());
if (code != TSDBConstants.JNI_SUCCESS) { if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to insertLines"); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to insertLines");
} }
} }
private native int insertLinesImp(String[] lines, long conn); private native int insertLinesImp(String[] lines, long conn, int type, int precision);
} }
...@@ -36,15 +36,15 @@ import java.util.regex.Pattern; ...@@ -36,15 +36,15 @@ import java.util.regex.Pattern;
* compatibility needs. * compatibility needs.
*/ */
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
// for jdbc preparedStatement interface
private String rawSql; private String rawSql;
private Object[] parameters; private Object[] parameters;
// for parameter binding
private ArrayList<ColumnInfo> colData; private long nativeStmtHandle = 0;
private String tableName;
private ArrayList<TableTagInfo> tableTags; private ArrayList<TableTagInfo> tableTags;
private int tagValueLength; private int tagValueLength;
private ArrayList<ColumnInfo> colData;
private String tableName;
private long nativeStmtHandle = 0;
TSDBPreparedStatement(TSDBConnection connection, String sql) { TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection); super(connection);
...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql(); preprocessSql();
} }
/*
*
*/
/** /**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override @Override
public void setObject(int parameterIndex, Object x) throws SQLException { public void setObject(int parameterIndex, Object x) throws SQLException {
if (isClosed()) { if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
} if (parameterIndex < 1 && parameterIndex >= parameters.length)
if (parameterIndex < 1 && parameterIndex >= parameters.length) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
}
parameters[parameterIndex - 1] = x; parameters[parameterIndex - 1] = x;
} }
...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
// TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
//TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
if (isClosed()) if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
} }
@Override @Override
...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible // NOTE: the following APIs are not JDBC compatible
// set the bind table name // parameter binding
private static class ColumnInfo { private static class ColumnInfo {
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
private ArrayList data; private ArrayList data;
...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
} }
} }
public void setTableName(String name) { public void setTableName(String name) throws SQLException {
if (this.tableName != null) {
this.columnDataExecuteBatch();
this.columnDataClearBatchInternal();
}
this.tableName = name; this.tableName = name;
} }
...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void columnDataExecuteBatch() throws SQLException { public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle); connector.executeBatch(this.nativeStmtHandle);
this.columnDataClearBatch(); this.columnDataClearBatchInternal();
} }
@Deprecated
public void columnDataClearBatch() { public void columnDataClearBatch() {
columnDataClearBatchInternal();
}
private void columnDataClearBatchInternal() {
int size = this.colData.size(); int size = this.colData.size();
this.colData.clear(); this.colData.clear();
this.colData.addAll(Collections.nCopies(size, null)); this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name this.tableName = null; // clear the table name
} }
public void columnDataCloseBatch() throws SQLException { public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle); connector.closeBatch(this.nativeStmtHandle);
...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ...@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.nativeStmtHandle = 0L; this.nativeStmtHandle = 0L;
this.tableName = null; this.tableName = null;
} }
@Override
public void close() throws SQLException {
this.columnDataClearBatchInternal();
this.columnDataCloseBatch();
super.close();
}
} }
package com.taosdata.jdbc.enums;
public enum SchemalessProtocolType {
UNKNOWN,
LINE,
TELNET,
JSON,
;
}
package com.taosdata.jdbc.enums;
public enum SchemalessTimestampType {
NOT_CONFIGURED,
HOURS,
MINUTES,
SECONDS,
MILLI_SECONDS,
MICRO_SECONDS,
NANO_SECONDS,
;
}
...@@ -6,7 +6,6 @@ import org.apache.http.HeaderElement; ...@@ -6,7 +6,6 @@ import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator; import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException; import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*; import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.client.protocol.HttpClientContext;
...@@ -20,22 +19,19 @@ import org.apache.http.protocol.HTTP; ...@@ -20,22 +19,19 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.sql.SQLException; import java.sql.SQLException;
public class HttpClientPoolUtil { public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json"; private static final String DEFAULT_CONTENT_TYPE = "application/json";
private static final int DEFAULT_MAX_TOTAL = 200;
private static final int DEFAULT_MAX_PER_ROUTE = 20;
private static final int DEFAULT_TIME_OUT = 15000;
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
private static final int DEFAULT_MAX_RETRY_COUNT = 5; private static final int DEFAULT_MAX_RETRY_COUNT = 5;
private static final int DEFAULT_MAX_TOTAL = 50;
private static final int DEFAULT_MAX_PER_ROUTE = 5;
private static final int DEFAULT_HTTP_KEEP_TIME = -1;
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) { while (it.hasNext()) {
...@@ -52,29 +48,18 @@ public class HttpClientPoolUtil { ...@@ -52,29 +48,18 @@ public class HttpClientPoolUtil {
return DEFAULT_HTTP_KEEP_TIME * 1000; return DEFAULT_HTTP_KEEP_TIME * 1000;
}; };
private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> { private static final CloseableHttpClient httpClient;
if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
// do not retry if over max retry count
return false;
if (exception instanceof InterruptedIOException)
// timeout
return false;
if (exception instanceof UnknownHostException)
// unknown host
return false;
if (exception instanceof SSLException)
// SSL handshake exception
return false;
return true;
};
private static CloseableHttpClient httpClient;
static { static {
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
httpClient = HttpClients.custom()
.setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
.setConnectionManager(connectionManager)
.setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT)
.build();
} }
/*** execute GET request ***/ /*** execute GET request ***/
...@@ -118,9 +103,10 @@ public class HttpClientPoolUtil { ...@@ -118,9 +103,10 @@ public class HttpClientPoolUtil {
HttpContext context = HttpClientContext.create(); HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context); CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity(); httpEntity = httpResponse.getEntity();
if (httpEntity != null) { if (httpEntity == null) {
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data);
} }
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} catch (ClientProtocolException e) { } catch (ClientProtocolException e) {
e.printStackTrace(); e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
...@@ -139,9 +125,6 @@ public class HttpClientPoolUtil { ...@@ -139,9 +125,6 @@ public class HttpClientPoolUtil {
private static HttpRequestBase getRequest(String uri, String methodName) { private static HttpRequestBase getRequest(String uri, String methodName) {
HttpRequestBase method; HttpRequestBase method;
RequestConfig requestConfig = RequestConfig.custom() RequestConfig requestConfig = RequestConfig.custom()
.setSocketTimeout(DEFAULT_TIME_OUT * 1000)
.setConnectTimeout(DEFAULT_TIME_OUT * 1000)
.setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
.setExpectContinueEnabled(false) .setExpectContinueEnabled(false)
.build(); .build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) { if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
......
...@@ -121,7 +121,7 @@ public class Utils { ...@@ -121,7 +121,7 @@ public class Utils {
} }
private static void findValuesClauseRangeSet(String preparedSql, RangeSet<Integer> clauseRangeSet) { private static void findValuesClauseRangeSet(String preparedSql, RangeSet<Integer> clauseRangeSet) {
Matcher matcher = Pattern.compile("(values|,)\\s*(\\([^)]*\\))").matcher(preparedSql); Matcher matcher = Pattern.compile("(values||,)\\s*(\\([^)]*\\))").matcher(preparedSql);
while (matcher.find()) { while (matcher.find()) {
int start = matcher.start(2); int start = matcher.start(2);
int end = matcher.end(2); int end = matcher.end(2);
......
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class ParameterBindTest {
private static final String host = "127.0.0.1";
private static final String stable = "weather";
private Connection conn;
private final Random random = new Random(System.currentTimeMillis());
@Test
public void test() {
// given
String[] tbnames = {"t1", "t2", "t3"};
int rows = 10;
// when
insertIntoTables(tbnames, 10);
// then
assertRows(stable, tbnames.length * rows);
for (String t : tbnames) {
assertRows(t, rows);
}
}
@Test
public void testMultiThreads() {
// given
String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}};
int rows = 10;
// when
List<Thread> threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList());
threads.forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
// then
for (String[] table : tables) {
for (String t : table) {
assertRows(t, rows);
}
}
}
private void assertRows(String tbname, int rows) {
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select count(*) from " + tbname);
while (rs.next()) {
int count = rs.getInt(1);
Assert.assertEquals(rows, count);
}
} catch (SQLException e) {
e.printStackTrace();
}
}
private void insertIntoTables(String[] tbnames, int rowsEachTable) {
long current = System.currentTimeMillis();
String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 0; i < tbnames.length; i++) {
pstmt.setTableName(tbnames[i]);
pstmt.setTagInt(0, random.nextInt(100));
pstmt.setTagInt(1, random.nextInt(100));
ArrayList<Long> timestampList = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
timestampList.add(current + i * 1000 + j);
}
pstmt.setTimestamp(0, timestampList);
ArrayList<Integer> f1List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f1List.add(random.nextInt(100));
}
pstmt.setInt(1, f1List);
ArrayList<Integer> f2List = new ArrayList<>();
for (int j = 0; j < rowsEachTable; j++) {
f2List.add(random.nextInt(100));
}
pstmt.setInt(2, f2List);
pstmt.columnDataAddBatch();
}
pstmt.columnDataExecuteBatch();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() {
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try {
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_pd");
stmt.execute("create database if not exists test_pd");
stmt.execute("use test_pd");
stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)");
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try {
// Statement stmt = conn.createStatement();
// stmt.execute("drop database if exists test_pd");
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
package com.taosdata.jdbc;
import com.taosdata.jdbc.enums.SchemalessProtocolType;
import com.taosdata.jdbc.enums.SchemalessTimestampType;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
public class SchemalessInsertTest {
private String host = "127.0.0.1";
private String dbname = "test_schemaless_insert";
private Connection conn;
@Test
public void schemalessInsert() throws SQLException {
// given
String[] lines = new String[]{
"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
"st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000"};
// when
try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
}
// then
Statement statement = conn.createStatement();
ResultSet rs = statement.executeQuery("show tables");
Assert.assertNotNull(rs);
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertTrue(metaData.getColumnCount() > 0);
int rowCnt = 0;
while (rs.next()) {
rowCnt++;
}
Assert.assertEquals(lines.length, rowCnt);
rs.close();
statement.close();
}
@Test
public void telnetInsert() throws SQLException {
// given
String[] lines = new String[]{
"stb0_0 1626006833 4 host=host0 interface=eth0",
"stb0_1 1626006833 4 host=host0 interface=eth0",
"stb0_2 1626006833 4 host=host0 interface=eth0 id=\"special_name\"",
};
// when
try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
}
// then
Statement statement = conn.createStatement();
ResultSet rs = statement.executeQuery("show tables");
Assert.assertNotNull(rs);
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertTrue(metaData.getColumnCount() > 0);
int rowCnt = 0;
while (rs.next()) {
rowCnt++;
}
Assert.assertEquals(lines.length, rowCnt);
rs.close();
statement.close();
}
@Test
public void jsonInsert() throws SQLException {
// given
String json = "[\n" +
" {\n" +
" \"metric\": \"cpu_load_1\",\n" +
" \"timestamp\": 1626006833,\n" +
" \"value\": 55.5,\n" +
" \"tags\": {\n" +
" \"host\": \"ubuntu\",\n" +
" \"interface\": \"eth1\",\n" +
" \"Id\": \"tb1\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"metric\": \"cpu_load_2\",\n" +
" \"timestamp\": 1626006834,\n" +
" \"value\": 55.5,\n" +
" \"tags\": {\n" +
" \"host\": \"ubuntu\",\n" +
" \"interface\": \"eth2\",\n" +
" \"Id\": \"tb2\"\n" +
" }\n" +
" }\n" +
"]";
// when
try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED);
}
// then
Statement statement = conn.createStatement();
ResultSet rs = statement.executeQuery("show tables");
Assert.assertNotNull(rs);
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertTrue(metaData.getColumnCount() > 0);
int rowCnt = 0;
while (rs.next()) {
rowCnt++;
}
// Assert.assertEquals(json.length, rowCnt);
rs.close();
statement.close();
}
@Before
public void before() {
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
try {
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists " + dbname);
stmt.execute("create database if not exists " + dbname + " precision 'ns'");
stmt.execute("use " + dbname);
} catch (SQLException e) {
e.printStackTrace();
}
}
@After
public void after() {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists " + dbname);
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
package com.taosdata.jdbc; package com.taosdata.jdbc;
import com.taosdata.jdbc.enums.SchemalessProtocolType;
import com.taosdata.jdbc.enums.SchemalessTimestampType;
import org.junit.Test; import org.junit.Test;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
...@@ -115,9 +117,10 @@ public class TSDBJNIConnectorTest { ...@@ -115,9 +117,10 @@ public class TSDBJNIConnectorTest {
} }
// close statement // close statement
connector.executeQuery("use d"); connector.executeQuery("use d");
String[] lines = new String[]{"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", String[] lines = new String[]{
"st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"}; "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
connector.insertLines(lines); "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000"};
connector.insertLines(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
// close connection // close connection
connector.closeConnection(); connector.closeConnection();
......
...@@ -518,7 +518,7 @@ public class SQLTest { ...@@ -518,7 +518,7 @@ public class SQLTest {
@Test @Test
public void testCase050() { public void testCase050() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location"; String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts";
// when // when
ResultSet rs = executeQuery(connection, sql); ResultSet rs = executeQuery(connection, sql);
// then // then
......
package com.taosdata.jdbc.utils;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.TSDBError;
import org.junit.Test;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class HttpClientPoolUtilTest {
String user = "root";
String password = "taosdata";
String host = "127.0.0.1";
String dbname = "log";
@Test
public void test() {
// given
List<Thread> threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> {
useDB();
})).collect(Collectors.toList());
threads.forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
private void useDB() {
try {
user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName());
password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName());
String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + "";
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
String token = jsonResult.getString("desc");
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}
String url = "http://" + host + ":6041/rest/sql";
String sql = "use " + dbname;
result = HttpClientPoolUtil.execute(url, sql, token);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
}
} catch (UnsupportedEncodingException | SQLException e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
...@@ -73,6 +73,48 @@ public class UtilsTest { ...@@ -73,6 +73,48 @@ public class UtilsTest {
Assert.assertEquals(expected, actual); Assert.assertEquals(expected, actual);
} }
@Test
public void multiValuesAndWhitespace() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?) (?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5) (300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesNoSeparator() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)(?,?,?,?)(?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4)(200,3.1415,'xyz',5)(300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesMultiSeparator() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?), (?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5), (300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test @Test
public void lineTerminator() { public void lineTerminator() {
// given // given
...@@ -100,6 +142,32 @@ public class UtilsTest { ...@@ -100,6 +142,32 @@ public class UtilsTest {
Assert.assertEquals(expected, actual); Assert.assertEquals(expected, actual);
} }
@Test
public void lineTerminatorAndMultiValuesAndNoneOrMoreWhitespace() {
String nativeSql = "INSERT Into ? TAGS(?) VALUES(?,?,\r\n?,?),(?,? ,\r\n?,?) t? tags (?) Values (?,?,?\r\n,?) (?,?,?,?) t? Tags(?) values (?,?,?,?) , (?,?,?,?)";
Object[] parameters = Stream.of("t1", "abc", 100, 1.1, "xxx", "xxx", 200, 2.2, "xxx", "xxx", 2, "bcd", 300, 3.3, "xxx", "xxx", 400, 4.4, "xxx", "xxx", 3, "cde", 500, 5.5, "xxx", "xxx", 600, 6.6, "xxx", "xxx").toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT Into t1 TAGS('abc') VALUES(100,1.1,\r\n'xxx','xxx'),(200,2.2 ,\r\n'xxx','xxx') t2 tags ('bcd') Values (300,3.3,'xxx'\r\n,'xxx') (400,4.4,'xxx','xxx') t3 Tags('cde') values (500,5.5,'xxx','xxx') , (600,6.6,'xxx','xxx')";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesAndNoneOrMoreWhitespace() {
String nativeSql = "INSERT INTO ? USING traces TAGS (?, ?) VALUES (?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?)";
Object[] parameters = Stream.of("t1", "t1", "t2", 1632968284000L, 111.111, 119.001, 0.4, 90, 99.1, "WGS84", 1632968285000L, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, "WGS84").toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO t1 USING traces TAGS ('t1', 't2') VALUES (1632968284000, 111.111, 119.001, 0.4, 90, 99.1, 'WGS84') (1632968285000, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, 'WGS84')";
Assert.assertEquals(expected, actual);
}
@Test @Test
public void replaceNothing() { public void replaceNothing() {
// given // given
......
...@@ -212,6 +212,7 @@ ...@@ -212,6 +212,7 @@
#define TK_INSERT 194 #define TK_INSERT 194
#define TK_INTO 195 #define TK_INTO 195
#define TK_VALUES 196 #define TK_VALUES 196
#define TK_FILE 197
...@@ -223,7 +224,6 @@ ...@@ -223,7 +224,6 @@
#define TK_HEX 303 // hex number 0x123 #define TK_HEX 303 // hex number 0x123
#define TK_OCT 304 // oct number #define TK_OCT 304 // oct number
#define TK_BIN 305 // bin format data 0b111 #define TK_BIN 305 // bin format data 0b111
#define TK_FILE 306
#define TK_QUESTION 307 // denoting the placeholder of "?",when invoking statement bind query #define TK_QUESTION 307 // denoting the placeholder of "?",when invoking statement bind query
#endif #endif
......
...@@ -26,6 +26,8 @@ ENDIF () ...@@ -26,6 +26,8 @@ ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ELSEIF (TD_WINDOWS) ELSEIF (TD_WINDOWS)
ADD_DEFINITIONS(-DUNICODE)
ADD_DEFINITIONS(-D_UNICODE)
LIST(APPEND SRC ./src/shellEngine.c) LIST(APPEND SRC ./src/shellEngine.c)
LIST(APPEND SRC ./src/shellMain.c) LIST(APPEND SRC ./src/shellMain.c)
LIST(APPEND SRC ./src/shellWindows.c) LIST(APPEND SRC ./src/shellWindows.c)
......
...@@ -95,6 +95,9 @@ SShellArguments args = { ...@@ -95,6 +95,9 @@ SShellArguments args = {
*/ */
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
/*setlocale(LC_ALL, "en_US.UTF-8"); */ /*setlocale(LC_ALL, "en_US.UTF-8"); */
#ifdef WINDOWS
SetConsoleOutputCP(CP_UTF8);
#endif
if (!checkVersion()) { if (!checkVersion()) {
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
......
...@@ -272,13 +272,16 @@ int32_t shellReadCommand(TAOS *con, char command[]) { ...@@ -272,13 +272,16 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE); cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE);
// Read input. // Read input.
char c; void *console = GetStdHandle(STD_INPUT_HANDLE);
unsigned long read;
wchar_t c;
char mbStr[16];
while (1) { while (1) {
c = getchar(); int ret = ReadConsole(console, &c, 1, &read, NULL);
int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL);
mbStr[size] = 0;
switch (c) { switch (c) {
case '\n': case '\n':
case '\r':
if (isReadyGo(&cmd)) { if (isReadyGo(&cmd)) {
sprintf(command, "%s%s", cmd.buffer, cmd.command); sprintf(command, "%s%s", cmd.buffer, cmd.command);
free(cmd.buffer); free(cmd.buffer);
...@@ -291,8 +294,12 @@ int32_t shellReadCommand(TAOS *con, char command[]) { ...@@ -291,8 +294,12 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
updateBuffer(&cmd); updateBuffer(&cmd);
} }
break; break;
case '\r':
break;
default: default:
insertChar(&cmd, c); for (int i = 0; i < size; ++i) {
insertChar(&cmd, mbStr[i]);
}
} }
} }
......
...@@ -3567,8 +3567,10 @@ static int postProceSql(char *host, uint16_t port, ...@@ -3567,8 +3567,10 @@ static int postProceSql(char *host, uint16_t port,
break; break;
received += bytes; received += bytes;
response_buf[RESP_BUF_LEN - 1] = '\0'; verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf);
response_buf[RESP_BUF_LEN - 1] = '\0';
if (strlen(response_buf)) { if (strlen(response_buf)) {
verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n", verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf); __func__, __LINE__, received, resp_len, response_buf);
...@@ -6611,7 +6613,6 @@ static int getRowDataFromSample( ...@@ -6611,7 +6613,6 @@ static int getRowDataFromSample(
stbInfo->sampleDataBuf stbInfo->sampleDataBuf
+ stbInfo->lenOfOneRow * (*sampleUsePos)); + stbInfo->lenOfOneRow * (*sampleUsePos));
} }
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
(*sampleUsePos)++; (*sampleUsePos)++;
...@@ -11211,7 +11212,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -11211,7 +11212,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
} }
*/ */
if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) { if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
#ifdef WINDOWS #ifdef WINDOWS
WSADATA wsaData; WSADATA wsaData;
...@@ -11237,7 +11237,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, ...@@ -11237,7 +11237,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->sockfd = sockfd; pThreadInfo->sockfd = sockfd;
} }
tsem_init(&(pThreadInfo->lock_sem), 0, 0); tsem_init(&(pThreadInfo->lock_sem), 0, 0);
if (ASYNC_MODE == g_Dbs.asyncMode) { if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
......
...@@ -140,28 +140,29 @@ typedef enum { ...@@ -140,28 +140,29 @@ typedef enum {
} EHTTP_CONTEXT_FAILED_CAUSE; } EHTTP_CONTEXT_FAILED_CAUSE;
typedef struct HttpContext { typedef struct HttpContext {
int32_t refCount; int32_t refCount;
SOCKET fd; SOCKET fd;
uint32_t accessTimes; uint32_t accessTimes;
uint32_t lastAccessTime; uint32_t lastAccessTime;
int32_t state; int32_t state;
uint8_t reqType; uint8_t reqType;
uint8_t parsed; uint8_t parsed;
uint8_t error; uint8_t error;
char ipstr[22]; char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[HTTP_PASSWORD_LEN]; char pass[HTTP_PASSWORD_LEN];
char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN]; char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
TAOS * taos; TAOS * taos;
void * ppContext; void * ppContext;
HttpSession *session; pthread_mutex_t ctxMutex;
z_stream gzipStream; HttpSession *session;
HttpParser *parser; z_stream gzipStream;
HttpSqlCmd singleCmd; HttpParser *parser;
HttpSqlCmds *multiCmds; HttpSqlCmd singleCmd;
JsonBuf * jsonBuf; HttpSqlCmds *multiCmds;
HttpEncodeMethod *encodeMethod; JsonBuf *jsonBuf;
HttpDecodeMethod *decodeMethod; HttpEncodeMethod *encodeMethod;
HttpDecodeMethod *decodeMethod;
struct HttpThread *pThread; struct HttpThread *pThread;
} HttpContext; } HttpContext;
......
...@@ -67,6 +67,8 @@ static void httpDestroyContext(void *data) { ...@@ -67,6 +67,8 @@ static void httpDestroyContext(void *data) {
pContext->parser = NULL; pContext->parser = NULL;
} }
pthread_mutex_destroy(&pContext->ctxMutex);
tfree(pContext); tfree(pContext);
} }
...@@ -128,6 +130,8 @@ HttpContext *httpCreateContext(SOCKET fd) { ...@@ -128,6 +130,8 @@ HttpContext *httpCreateContext(SOCKET fd) {
// set the ref to 0 // set the ref to 0
taosCacheRelease(tsHttpServer.contextCache, (void **)&ppContext, false); taosCacheRelease(tsHttpServer.contextCache, (void **)&ppContext, false);
pthread_mutex_init(&pContext->ctxMutex, NULL);
return pContext; return pContext;
} }
......
...@@ -45,15 +45,14 @@ bool httpProcessData(HttpContext* pContext) { ...@@ -45,15 +45,14 @@ bool httpProcessData(HttpContext* pContext) {
httpTrace("context:%p, fd:%d, process options request", pContext, pContext->fd); httpTrace("context:%p, fd:%d, process options request", pContext, pContext->fd);
httpSendOptionResp(pContext, "process options request success"); httpSendOptionResp(pContext, "process options request success");
} else { } else {
if (!httpDecodeRequest(pContext)) { pthread_mutex_lock(&pContext->ctxMutex);
/*
* httpCloseContextByApp has been called when parsing the error if (httpDecodeRequest(pContext)) {
*/
// httpCloseContextByApp(pContext);
} else {
httpClearParser(pContext->parser); httpClearParser(pContext->parser);
httpProcessRequest(pContext); httpProcessRequest(pContext);
} }
pthread_mutex_unlock(&pContext->ctxMutex);
} }
return true; return true;
......
...@@ -406,7 +406,14 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { ...@@ -406,7 +406,14 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) {
if (pContext->session == NULL) { if (pContext->session == NULL) {
httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL); httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL);
} else { } else {
// httpProcessRequestCb called by another thread and a subsequent thread calls this
// function again, if this function called by httpProcessRequestCb executes memset
// just before the subsequent thread executes *Cmd function, nativSql will be NULL
pthread_mutex_lock(&pContext->ctxMutex);
httpExecCmd(pContext); httpExecCmd(pContext);
pthread_mutex_unlock(&pContext->ctxMutex);
} }
} }
......
...@@ -229,6 +229,7 @@ typedef struct SQueryAttr { ...@@ -229,6 +229,7 @@ typedef struct SQueryAttr {
bool stateWindow; // window State on sub/normal table bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed bool createFilterOperator; // if filter operator is needed
bool multigroupResult; // multigroup result can exist in one SSDataBlock bool multigroupResult; // multigroup result can exist in one SSDataBlock
bool needSort; // need sort rowRes
int32_t interBufSize; // intermediate buffer sizse int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number int32_t havingNum; // having expr number
......
...@@ -152,7 +152,8 @@ typedef struct SQueryInfo { ...@@ -152,7 +152,8 @@ typedef struct SQueryInfo {
struct SQueryInfo *pDownstream; struct SQueryInfo *pDownstream;
int32_t havingFieldNum; int32_t havingFieldNum;
bool stableQuery; bool stableQuery;
bool groupbyColumn; bool groupbyColumn;
bool groupbyTag;
bool simpleAgg; bool simpleAgg;
bool arithmeticOnAgg; bool arithmeticOnAgg;
bool projectionQuery; bool projectionQuery;
......
...@@ -921,4 +921,4 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s ...@@ -921,4 +921,4 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED %fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES. NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES FILE.
...@@ -1313,9 +1313,6 @@ static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx ...@@ -1313,9 +1313,6 @@ static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
if (pCtx[k].currentStage == MERGE_STAGE) { if (pCtx[k].currentStage == MERGE_STAGE) {
pCtx[k].order = TSDB_ORDER_ASC; pCtx[k].order = TSDB_ORDER_ASC;
} }
pCtx[k].startTs = pQueryAttr->window.skey;
if (pCtx[k].functionId < 0) { if (pCtx[k].functionId < 0) {
// load the script and exec // load the script and exec
SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo;
...@@ -5991,6 +5988,18 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) { ...@@ -5991,6 +5988,18 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
return NULL; return NULL;
} }
static int32_t resRowCompare(const void *r1, const void *r2) {
SResultRow *res1 = *(SResultRow **)r1;
SResultRow *res2 = *(SResultRow **)r2;
if (res1->win.skey == res2->win.skey) {
return 0;
} else {
return res1->win.skey > res2->win.skey ? 1 : -1;
}
}
static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) { static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param; SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
...@@ -6036,6 +6045,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) { ...@@ -6036,6 +6045,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
pQueryAttr->window = win; pQueryAttr->window = win;
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
if (pIntervalInfo->resultRowInfo.size > 0 && pQueryAttr->needSort) {
qsort(pIntervalInfo->resultRowInfo.pResult, pIntervalInfo->resultRowInfo.size, POINTER_BYTES, resRowCompare);
}
closeAllResultRows(&pIntervalInfo->resultRowInfo); closeAllResultRows(&pIntervalInfo->resultRowInfo);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset); finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset);
......
...@@ -67,7 +67,6 @@ SSqlInfo qSqlParse(const char *pStr) { ...@@ -67,7 +67,6 @@ SSqlInfo qSqlParse(const char *pStr) {
sqlInfo.valid = false; sqlInfo.valid = false;
goto abort_parse; goto abort_parse;
} }
default: default:
Parse(pParser, t0.type, t0, &sqlInfo); Parse(pParser, t0.type, t0, &sqlInfo);
if (sqlInfo.valid == false) { if (sqlInfo.valid == false) {
......
此差异已折叠。
...@@ -41,6 +41,7 @@ typedef struct STable { ...@@ -41,6 +41,7 @@ typedef struct STable {
int16_t restoreColumnNum; int16_t restoreColumnNum;
bool hasRestoreLastColumn; bool hasRestoreLastColumn;
int lastColSVersion; int lastColSVersion;
int16_t cacheLastConfigVersion;
T_REF_DECLARE() T_REF_DECLARE()
} STable; } STable;
......
...@@ -79,8 +79,8 @@ struct STsdbRepo { ...@@ -79,8 +79,8 @@ struct STsdbRepo {
STsdbCfg save_config; // save apply config STsdbCfg save_config; // save apply config
bool config_changed; // config changed flag bool config_changed; // config changed flag
pthread_mutex_t save_mutex; // protect save config pthread_mutex_t save_mutex; // protect save config
uint8_t hasCachedLastColumn; int16_t cacheLastConfigVersion;
STsdbAppH appH; STsdbAppH appH;
STsdbStat stat; STsdbStat stat;
...@@ -111,7 +111,8 @@ int tsdbUnlockRepo(STsdbRepo* pRepo); ...@@ -111,7 +111,8 @@ int tsdbUnlockRepo(STsdbRepo* pRepo);
STsdbMeta* tsdbGetMeta(STsdbRepo* pRepo); STsdbMeta* tsdbGetMeta(STsdbRepo* pRepo);
int tsdbCheckCommit(STsdbRepo* pRepo); int tsdbCheckCommit(STsdbRepo* pRepo);
int tsdbRestoreInfo(STsdbRepo* pRepo); int tsdbRestoreInfo(STsdbRepo* pRepo);
int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg); UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg);
int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable* pTable);
void tsdbGetRootDir(int repoid, char dirName[]); void tsdbGetRootDir(int repoid, char dirName[]);
void tsdbGetDataDir(int repoid, char dirName[]); void tsdbGetDataDir(int repoid, char dirName[]);
......
...@@ -1468,7 +1468,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1468,7 +1468,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) { for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail //TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints); pTarget->maxPoints, 0);
} }
pTarget->numOfRows++; pTarget->numOfRows++;
...@@ -1480,7 +1480,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1480,7 +1480,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL); ASSERT(pSchema != NULL);
} }
tdAppendMemRowToDataCol(row, pSchema, pTarget, true); tdAppendMemRowToDataCol(row, pSchema, pTarget, true, 0);
tSkipListIterNext(pCommitIter->pIter); tSkipListIterNext(pCommitIter->pIter);
} else { } else {
...@@ -1489,7 +1489,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1489,7 +1489,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) { for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail //TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints); pTarget->maxPoints, 0);
} }
if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++; if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++;
...@@ -1502,7 +1502,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt ...@@ -1502,7 +1502,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL); ASSERT(pSchema != NULL);
} }
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE,
update != TD_ROW_PARTIAL_UPDATE ? 0 : -1);
} }
(*iter)++; (*iter)++;
tSkipListIterNext(pCommitIter->pIter); tSkipListIterNext(pCommitIter->pIter);
......
...@@ -146,7 +146,9 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) { ...@@ -146,7 +146,9 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
if (oldCfg.cacheLastRow != pRepo->config.cacheLastRow) { if (oldCfg.cacheLastRow != pRepo->config.cacheLastRow) {
if (tsdbLockRepo(pRepo) < 0) return; if (tsdbLockRepo(pRepo) < 0) return;
tsdbCacheLastData(pRepo, &oldCfg); // tsdbCacheLastData(pRepo, &oldCfg);
// lazy load last cache when query or update
++pRepo->cacheLastConfigVersion;
tsdbUnlockRepo(pRepo); tsdbUnlockRepo(pRepo);
} }
......
...@@ -67,7 +67,9 @@ int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); } ...@@ -67,7 +67,9 @@ int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); }
void *tsdbCompactImpl(STsdbRepo *pRepo) { void *tsdbCompactImpl(STsdbRepo *pRepo) {
// Check if there are files in TSDB FS to compact // Check if there are files in TSDB FS to compact
if (REPO_FS(pRepo)->cstatus->pmf == NULL) { if (REPO_FS(pRepo)->cstatus->pmf == NULL) {
tsdbInfo("vgId:%d no file to compact in FS", REPO_ID(pRepo)); pRepo->compactState = TSDB_NO_COMPACT;
tsem_post(&(pRepo->readyToCommit));
tsdbInfo("vgId:%d compact over, no file to compact in FS", REPO_ID(pRepo));
return NULL; return NULL;
} }
......
...@@ -576,7 +576,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { ...@@ -576,7 +576,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL; return NULL;
} }
pRepo->config_changed = false; pRepo->config_changed = false;
atomic_store_8(&pRepo->hasCachedLastColumn, 0); pRepo->cacheLastConfigVersion = 0;
code = tsem_init(&(pRepo->readyToCommit), 0, 1); code = tsem_init(&(pRepo->readyToCommit), 0, 1);
if (code != 0) { if (code != 0) {
...@@ -726,7 +726,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea ...@@ -726,7 +726,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
loadStatisData = true; loadStatisData = true;
} }
} }
TSDB_WLOCK_TABLE(pTable); // lock when update pTable->lastCols[]
for (int16_t i = 0; i < numColumns && numColumns > pTable->restoreColumnNum; ++i) { for (int16_t i = 0; i < numColumns && numColumns > pTable->restoreColumnNum; ++i) {
STColumn *pCol = schemaColAt(pSchema, i); STColumn *pCol = schemaColAt(pSchema, i);
// ignore loaded columns // ignore loaded columns
...@@ -775,6 +775,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea ...@@ -775,6 +775,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
break; break;
} }
} }
TSDB_WUNLOCK_TABLE(pTable);
} }
out: out:
...@@ -803,20 +804,33 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh, ...@@ -803,20 +804,33 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh,
// Get the data in row // Get the data in row
STSchema *pSchema = tsdbGetTableSchema(pTable); STSchema *pSchema = tsdbGetTableSchema(pTable);
pTable->lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); SMemRow lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
if (pTable->lastRow == NULL) { if (lastRow == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1; return -1;
} }
memRowSetType(pTable->lastRow, SMEM_ROW_DATA); memRowSetType(lastRow, SMEM_ROW_DATA);
tdInitDataRow(memRowDataBody(pTable->lastRow), pSchema); tdInitDataRow(memRowDataBody(lastRow), pSchema);
for (int icol = 0; icol < schemaNCols(pSchema); icol++) { for (int icol = 0; icol < schemaNCols(pSchema); icol++) {
STColumn *pCol = schemaColAt(pSchema, icol); STColumn *pCol = schemaColAt(pSchema, icol);
SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol; SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol;
tdAppendColVal(memRowDataBody(pTable->lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, tdAppendColVal(memRowDataBody(lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type,
pCol->offset); pCol->offset);
} }
TSKEY lastKey = memRowKey(lastRow);
// during the load data in file, new data would be inserted and last row has been updated
TSDB_WLOCK_TABLE(pTable);
if (pTable->lastRow == NULL) {
pTable->lastKey = lastKey;
pTable->lastRow = lastRow;
TSDB_WUNLOCK_TABLE(pTable);
} else {
TSDB_WUNLOCK_TABLE(pTable);
taosTZfree(lastRow);
}
return 0; return 0;
} }
...@@ -887,14 +901,105 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) { ...@@ -887,14 +901,105 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbDestroyReadH(&readh); tsdbDestroyReadH(&readh);
if (CACHE_LAST_NULL_COLUMN(pCfg)) { // if (CACHE_LAST_NULL_COLUMN(pCfg)) {
atomic_store_8(&pRepo->hasCachedLastColumn, 1); // atomic_store_8(&pRepo->hasCachedLastColumn, 1);
// }
return 0;
}
int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable *pTable) {
SFSIter fsiter;
SReadH readh;
SDFileSet *pSet;
int cacheLastRowTableNum = 0;
int cacheLastColTableNum = 0;
bool cacheLastRow = CACHE_LAST_ROW(&(pRepo->config));
bool cacheLastCol = CACHE_LAST_NULL_COLUMN(&(pRepo->config));
tsdbDebug("tsdbLoadLastCache for %s, cacheLastRow:%d, cacheLastCol:%d", pTable->name->data, cacheLastRow, cacheLastCol);
pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
if (!cacheLastRow && pTable->lastRow != NULL) {
taosTZfree(pTable->lastRow);
pTable->lastRow = NULL;
}
if (!cacheLastCol && pTable->lastCols != NULL) {
tsdbFreeLastColumns(pTable);
}
if (!cacheLastRow && !cacheLastCol) {
return 0;
}
cacheLastRowTableNum = (cacheLastRow && pTable->lastRow == NULL) ? 1 : 0;
cacheLastColTableNum = (cacheLastCol && pTable->lastCols == NULL) ? 1 : 0;
if (cacheLastRowTableNum == 0 && cacheLastColTableNum == 0) {
return 0;
}
if (tsdbInitReadH(&readh, pRepo) < 0) {
return -1;
} }
tsdbRLockFS(REPO_FS(pRepo));
tsdbFSIterInit(&fsiter, REPO_FS(pRepo), TSDB_FS_ITER_BACKWARD);
while ((cacheLastRowTableNum > 0 || cacheLastColTableNum > 0) && (pSet = tsdbFSIterNext(&fsiter)) != NULL) {
if (tsdbSetAndOpenReadFSet(&readh, pSet) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
if (tsdbLoadBlockIdx(&readh) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
// tsdbDebug("tsdbRestoreInfo restore vgId:%d,table:%s", REPO_ID(pRepo), pTable->name->data);
if (tsdbSetReadTable(&readh, pTable) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
SBlockIdx *pIdx = readh.pBlkIdx;
if (pIdx && (cacheLastRowTableNum > 0) && (pTable->lastRow == NULL)) {
if (tsdbRestoreLastRow(pRepo, pTable, &readh, pIdx) != 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
cacheLastRowTableNum -= 1;
}
// restore NULL columns
if (pIdx && (cacheLastColTableNum > 0) && !pTable->hasRestoreLastColumn) {
if (tsdbRestoreLastColumns(pRepo, pTable, &readh) != 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
if (pTable->hasRestoreLastColumn) {
cacheLastColTableNum -= 1;
}
}
}
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return 0; return 0;
} }
int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) { UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
bool cacheLastRow = false, cacheLastCol = false; bool cacheLastRow = false, cacheLastCol = false;
SFSIter fsiter; SFSIter fsiter;
SReadH readh; SReadH readh;
...@@ -928,9 +1033,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) { ...@@ -928,9 +1033,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
// if close last option,need to free data // if close last option,need to free data
if (need_free_last_row || need_free_last_col) { if (need_free_last_row || need_free_last_col) {
if (need_free_last_col) { // if (need_free_last_col) {
atomic_store_8(&pRepo->hasCachedLastColumn, 0); // atomic_store_8(&pRepo->hasCachedLastColumn, 0);
} // }
tsdbInfo("free cache last data since cacheLast option changed"); tsdbInfo("free cache last data since cacheLast option changed");
for (int i = 1; i <= maxTableIdx; i++) { for (int i = 1; i <= maxTableIdx; i++) {
STable *pTable = pMeta->tables[i]; STable *pTable = pMeta->tables[i];
...@@ -1008,9 +1113,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) { ...@@ -1008,9 +1113,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
tsdbDestroyReadH(&readh); tsdbDestroyReadH(&readh);
if (cacheLastCol) { // if (cacheLastCol) {
atomic_store_8(&pRepo->hasCachedLastColumn, 1); // atomic_store_8(&pRepo->hasCachedLastColumn, 1);
} // }
return 0; return 0;
} }
...@@ -594,7 +594,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema * ...@@ -594,7 +594,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
} }
} }
tdAppendMemRowToDataCol(row, *ppSchema, pCols, true); tdAppendMemRowToDataCol(row, *ppSchema, pCols, true, 0);
} }
return 0; return 0;
...@@ -1001,7 +1001,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro ...@@ -1001,7 +1001,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
if ((value == NULL) || isNull(value, pTCol->type)) { if ((value == NULL) || isNull(value, pTCol->type)) {
continue; continue;
} }
// lock
TSDB_WLOCK_TABLE(pTable);
SDataCol *pDataCol = &(pLatestCols[idx]); SDataCol *pDataCol = &(pLatestCols[idx]);
if (pDataCol->pData == NULL) { if (pDataCol->pData == NULL) {
pDataCol->pData = malloc(pTCol->bytes); pDataCol->pData = malloc(pTCol->bytes);
...@@ -1017,6 +1018,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro ...@@ -1017,6 +1018,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
memcpy(pDataCol->pData, value, bytes); memcpy(pDataCol->pData, value, bytes);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData); //tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol->ts = memRowKey(row); pDataCol->ts = memRowKey(row);
// unlock
TSDB_WUNLOCK_TABLE(pTable);
} }
} }
...@@ -1063,5 +1066,8 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r ...@@ -1063,5 +1066,8 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r
updateTableLatestColumn(pRepo, pTable, row); updateTableLatestColumn(pRepo, pTable, row);
} }
} }
pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
return 0; return 0;
} }
...@@ -639,27 +639,30 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) { ...@@ -639,27 +639,30 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
} }
int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) { int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) {
ASSERT(pTable->lastCols == NULL); TSDB_WLOCK_TABLE(pTable);
if (pTable->lastCols == NULL) {
int16_t numOfColumn = pSchema->numOfCols;
int16_t numOfColumn = pSchema->numOfCols; pTable->lastCols = (SDataCol *)malloc(numOfColumn * sizeof(SDataCol));
if (pTable->lastCols == NULL) {
TSDB_WUNLOCK_TABLE(pTable);
return -1;
}
pTable->lastCols = (SDataCol*)malloc(numOfColumn * sizeof(SDataCol)); for (int16_t i = 0; i < numOfColumn; ++i) {
if (pTable->lastCols == NULL) { STColumn *pCol = schemaColAt(pSchema, i);
return -1; SDataCol *pDataCol = &(pTable->lastCols[i]);
} pDataCol->bytes = 0;
pDataCol->pData = NULL;
pDataCol->colId = pCol->colId;
}
for (int16_t i = 0; i < numOfColumn; ++i) { pTable->lastColSVersion = schemaVersion(pSchema);
STColumn *pCol = schemaColAt(pSchema, i); pTable->maxColNum = numOfColumn;
SDataCol* pDataCol = &(pTable->lastCols[i]); pTable->restoreColumnNum = 0;
pDataCol->bytes = 0; pTable->hasRestoreLastColumn = false;
pDataCol->pData = NULL;
pDataCol->colId = pCol->colId;
} }
TSDB_WUNLOCK_TABLE(pTable);
pTable->lastColSVersion = schemaVersion(pSchema);
pTable->maxColNum = numOfColumn;
pTable->restoreColumnNum = 0;
pTable->hasRestoreLastColumn = false;
return 0; return 0;
} }
...@@ -809,6 +812,7 @@ static STable *tsdbNewTable() { ...@@ -809,6 +812,7 @@ static STable *tsdbNewTable() {
pTable->lastCols = NULL; pTable->lastCols = NULL;
pTable->restoreColumnNum = 0; pTable->restoreColumnNum = 0;
pTable->cacheLastConfigVersion = 0;
pTable->maxColNum = 0; pTable->maxColNum = 0;
pTable->hasRestoreLastColumn = false; pTable->hasRestoreLastColumn = false;
pTable->lastColSVersion = -1; pTable->lastColSVersion = -1;
......
...@@ -157,6 +157,7 @@ typedef struct STableGroupSupporter { ...@@ -157,6 +157,7 @@ typedef struct STableGroupSupporter {
static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList); static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList);
static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList); static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList);
static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle); static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle);
static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle);
static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey); static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey);
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
...@@ -591,6 +592,28 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon ...@@ -591,6 +592,28 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
} }
static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle) {
STsdbRepo* pRepo = pQueryHandle->pTsdb;
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
int32_t code = 0;
for (size_t i = 0; i < numOfTables; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
STable* pTable = pCheckInfo->pTableObj;
if (pTable->cacheLastConfigVersion == pRepo->cacheLastConfigVersion) {
continue;
}
code = tsdbLoadLastCache(pRepo, pTable);
if (code != 0) {
tsdbError("%p uid:%" PRId64 ", tid:%d, failed to load last cache since %s", pQueryHandle, pTable->tableId.uid,
pTable->tableId.tid, tstrerror(terrno));
break;
}
}
return code;
}
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) { TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList); pCond->twindow = updateLastrowForEachGroup(groupList);
...@@ -604,6 +627,8 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable ...@@ -604,6 +627,8 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return NULL; return NULL;
} }
lazyLoadCacheLast(pQueryHandle);
int32_t code = checkForCachedLastRow(pQueryHandle, groupList); int32_t code = checkForCachedLastRow(pQueryHandle, groupList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0 if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code; terrno = code;
...@@ -618,13 +643,14 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable ...@@ -618,13 +643,14 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return pQueryHandle; return pQueryHandle;
} }
TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) { TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef); STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
if (pQueryHandle == NULL) { if (pQueryHandle == NULL) {
return NULL; return NULL;
} }
lazyLoadCacheLast(pQueryHandle);
int32_t code = checkForCachedLast(pQueryHandle); int32_t code = checkForCachedLast(pQueryHandle);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0 if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code; terrno = code;
...@@ -2758,6 +2784,9 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { ...@@ -2758,6 +2784,9 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
} }
int32_t i = 0, j = 0; int32_t i = 0, j = 0;
// lock pTable->lastCols[i] as it would be released when schema update(tsdbUpdateLastColSchema)
TSDB_RLOCK_TABLE(pTable);
while(i < tgNumOfCols && j < numOfCols) { while(i < tgNumOfCols && j < numOfCols) {
pColInfo = taosArrayGet(pQueryHandle->pColumns, i); pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pTable->lastCols[j].colId < pColInfo->info.colId) { if (pTable->lastCols[j].colId < pColInfo->info.colId) {
...@@ -2844,6 +2873,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) { ...@@ -2844,6 +2873,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
i++; i++;
j++; j++;
} }
TSDB_RUNLOCK_TABLE(pTable);
// leave the real ts column as the last row, because last function only (not stable) use the last row as res // leave the real ts column as the last row, because last function only (not stable) use the last row as res
if (priKey != TSKEY_INITIAL_VAL) { if (priKey != TSKEY_INITIAL_VAL) {
...@@ -3175,7 +3205,9 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) { ...@@ -3175,7 +3205,9 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) {
int32_t code = 0; int32_t code = 0;
if (pQueryHandle->pTsdb && atomic_load_8(&pQueryHandle->pTsdb->hasCachedLastColumn)){ STsdbRepo* pRepo = pQueryHandle->pTsdb;
if (pRepo && CACHE_LAST_NULL_COLUMN(&(pRepo->config))) {
pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST; pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
extern "C" { extern "C" {
#endif #endif
#define TSDB_CFG_MAX_NUM 125 #define TSDB_CFG_MAX_NUM 128
#define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41 #define TSDB_CFG_VALUE_LEN 41
......
...@@ -346,7 +346,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const ...@@ -346,7 +346,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const
/* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */ /* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */
output[pos] |= t; output[pos] |= t;
} else { } else {
uError("Invalid compress bool value:%d", output[pos]); uError("Invalid compress bool value:%d", input[i]);
return -1; return -1;
} }
} }
......
...@@ -144,7 +144,6 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it ...@@ -144,7 +144,6 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
// backward to put the first data // backward to put the first data
hasDup = tSkipListGetPosToPut(pSkipList, backward, pData); hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
tSkipListPutImpl(pSkipList, pData, backward, false, hasDup); tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) { for (int level = 0; level < pSkipList->maxLevel; level++) {
...@@ -163,7 +162,12 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it ...@@ -163,7 +162,12 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
for (int i = 0; i < pSkipList->maxLevel; i++) { for (int i = 0; i < pSkipList->maxLevel; i++) {
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i); forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
} }
} else if(compare == 0) {
// same need special deal
forward[0] = SL_NODE_GET_BACKWARD_POINTER(SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail,0),0);
hasDup = true;
} else { } else {
SSkipListNode *p = NULL;
SSkipListNode *px = pSkipList->pHead; SSkipListNode *px = pSkipList->pHead;
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
if (i < pSkipList->level) { if (i < pSkipList->level) {
...@@ -175,19 +179,29 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it ...@@ -175,19 +179,29 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
} }
} }
SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i); // if px not head , must compare with px
if(px == pSkipList->pHead) {
p = SL_NODE_GET_FORWARD_POINTER(px, i);
} else {
p = px;
}
while (p != pSkipList->pTail) { while (p != pSkipList->pTail) {
pKey = SL_GET_NODE_KEY(pSkipList, p); pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey); compare = pSkipList->comparFn(pKey, pDataKey);
if (compare >= 0) { if (compare >= 0) {
if (compare == 0 && !hasDup) hasDup = true; if (compare == 0) {
hasDup = true;
forward[0] = SL_NODE_GET_BACKWARD_POINTER(p, 0);
}
break; break;
} else { } else {
px = p; px = p;
p = SL_NODE_GET_FORWARD_POINTER(px, i); p = SL_NODE_GET_FORWARD_POINTER(px, i);
} }
} }
// if found duplicate, immediately break, needn't continue to loop set rest forward[i] value
if(hasDup) break;
} }
forward[i] = px; forward[i] = px;
......
...@@ -592,7 +592,7 @@ SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken) ...@@ -592,7 +592,7 @@ SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken)
size_t nsize = strlen(newToken); size_t nsize = strlen(newToken);
int32_t size = (int32_t)strlen(*str) - token->n + (int32_t)nsize + 1; int32_t size = (int32_t)strlen(*str) - token->n + (int32_t)nsize + 1;
int32_t bsize = (int32_t)((uint64_t)token->z - (uint64_t)src); int32_t bsize = (int32_t)((uint64_t)token->z - (uint64_t)src);
SStrToken ntoken; SStrToken ntoken = {0};
*str = calloc(1, size); *str = calloc(1, size);
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
import subprocess
from random import choice
class TwoClients:
def initConnection(self):
self.host = "chenhaoran01"
self.user = "root"
self.password = "taosdata"
self.config = "/home/chr/cfg/single/"
self.port =6030
self.rowNum = 10
self.ts = 1537146000000
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
walFilePath = "/var/lib/taos/mnode_bak/wal/"
# new taos client
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
print(conn1)
cur1 = conn1.cursor()
tdSql.init(cur1, True)
# create backgroud db and tb
tdSql.execute("drop database if exists db1")
os.system("%staosdemo -f compress/insertDataDb1.json -y " % binPath)
# create foreground db and tb
tdSql.execute("drop database if exists foredb")
tdSql.execute("create database foredb")
tdSql.execute("use foredb")
print("123test")
tdSql.execute("create stable if not exists stb (ts timestamp, dataInt int, dataDouble double,dataStr nchar(200)) tags(loc nchar(50),t1 int)")
tdSql.execute("create table tb1 using stb tags('beijing1', 10)")
tdSql.execute("insert into tb1 values(1614218412000,8635,98.861,'qazwsxedcrfvtgbyhnujmikolp1')(1614218422000,8636,98.862,'qazwsxedcrfvtgbyhnujmikolp2')")
tdSql.execute("create table tb2 using stb tags('beijing2', 11)")
tdSql.execute("insert into tb2 values(1614218432000,8647,98.863,'qazwsxedcrfvtgbyhnujmikolp3')")
tdSql.execute("insert into tb2 values(1614218442000,8648,98.864,'qazwsxedcrfvtgbyhnujmikolp4')")
# check data correct
tdSql.execute("use db1")
tdSql.query("select count(tbname) from stb0")
tdSql.checkData(0, 0, 50000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 5000000)
tdSql.execute("use foredb")
tdSql.query("select count (tbname) from stb")
tdSql.checkData(0, 0, 2)
tdSql.query("select count (*) from stb")
tdSql.checkData(0, 0, 4)
tdSql.query("select * from tb1 order by ts")
tdSql.checkData(0, 3, "qazwsxedcrfvtgbyhnujmikolp1")
tdSql.query("select * from tb2 order by ts")
tdSql.checkData(1, 3, "qazwsxedcrfvtgbyhnujmikolp4")
# delete useless file
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
# os.system("rm -rf compress/%s.sql" % testcaseFilename )
clients = TwoClients()
clients.initConnection()
# clients.getBuildPath()
clients.run()
\ No newline at end of file
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 50000,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 100,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 0,
"childtable_offset":0,
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"},{"type": "TINYINT"},{"type": "smallint"},{"type": "bool"},{"type": "bigint"},{"type": "float"},{"type": "double"}, {"type": "BINARY","len": 32}, {"type": "nchar","len": 32}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}
...@@ -31,23 +31,23 @@ class TDTestCase: ...@@ -31,23 +31,23 @@ class TDTestCase:
tdSql.prepare() tdSql.prepare()
tdSql.execute('reset query cache') tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db') tdSql.execute('drop database if exists db')
tdSql.error('create database db keep 365000'); tdSql.error('create database db keep 365000')
tdSql.execute('create database db precision "ns";') tdSql.execute('create database db precision "ns"')
tdSql.query('show databases;') tdSql.query('show databases')
tdSql.checkData(0,16,'ns') tdSql.checkData(0,16,'ns')
tdSql.execute('use db') tdSql.execute('use db')
tdLog.debug('testing nanosecond support in 1st timestamp') tdLog.debug('testing nanosecond support in 1st timestamp')
tdSql.execute('create table tb (ts timestamp, speed int)') tdSql.execute('create table tb (ts timestamp, speed int)')
tdSql.execute('insert into tb values(\'2021-06-10 0:00:00.100000001\', 1);') tdSql.execute('insert into tb values(\'2021-06-10 0:00:00.100000001\', 1)')
tdSql.execute('insert into tb values(1623254400150000000, 2);') tdSql.execute('insert into tb values(1623254400150000000, 2)')
tdSql.execute('import into tb values(1623254400300000000, 3);') tdSql.execute('import into tb values(1623254400300000000, 3)')
tdSql.execute('import into tb values(1623254400299999999, 4);') tdSql.execute('import into tb values(1623254400299999999, 4)')
tdSql.execute('insert into tb values(1623254400300000001, 5);') tdSql.execute('insert into tb values(1623254400300000001, 5)')
tdSql.execute('insert into tb values(1623254400999999999, 7);') tdSql.execute('insert into tb values(1623254400999999999, 7)')
tdSql.query('select * from tb;') tdSql.query('select * from tb')
tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001')
tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000')
tdSql.checkData(2,0,'2021-06-10 0:00:00.299999999') tdSql.checkData(2,0,'2021-06-10 0:00:00.299999999')
...@@ -55,61 +55,61 @@ class TDTestCase: ...@@ -55,61 +55,61 @@ class TDTestCase:
tdSql.checkData(4,1,5) tdSql.checkData(4,1,5)
tdSql.checkData(5,1,7) tdSql.checkData(5,1,7)
tdSql.checkRows(6) tdSql.checkRows(6)
tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;') tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';') tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\'')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;') tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';') tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\'')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts > 1623254400400000000;') tdSql.query('select count(*) from tb where ts > 1623254400400000000')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';') tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\'')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb where ts > now + 400000000b;') tdSql.query('select count(*) from tb where ts > now + 400000000b')
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';') tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\'')
tdSql.checkData(0,0,6) tdSql.checkData(0,0,6)
tdSql.query('select count(*) from tb where ts <= 1623254400300000000;') tdSql.query('select count(*) from tb where ts <= 1623254400300000000')
tdSql.checkData(0,0,4) tdSql.checkData(0,0,4)
tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';') tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\'')
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query('select count(*) from tb where ts = 1623254400150000000;') tdSql.query('select count(*) from tb where ts = 1623254400150000000')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';') tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\'')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;') tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';') tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\'')
tdSql.checkData(0,0,3) tdSql.checkData(0,0,3)
tdSql.query('select avg(speed) from tb interval(5000000000b);') tdSql.query('select avg(speed) from tb interval(5000000000b)')
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.query('select avg(speed) from tb interval(100000000b)') tdSql.query('select avg(speed) from tb interval(100000000b)')
tdSql.checkRows(4) tdSql.checkRows(4)
tdSql.error('select avg(speed) from tb interval(1b);') tdSql.error('select avg(speed) from tb interval(1b)')
tdSql.error('select avg(speed) from tb interval(999b);') tdSql.error('select avg(speed) from tb interval(999b)')
tdSql.query('select avg(speed) from tb interval(1000b);') tdSql.query('select avg(speed) from tb interval(1000b)')
tdSql.checkRows(5) tdSql.checkRows(5)
tdSql.query('select avg(speed) from tb interval(1u);') tdSql.query('select avg(speed) from tb interval(1u)')
tdSql.checkRows(5) tdSql.checkRows(5)
tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);') tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b)')
tdSql.checkRows(4) tdSql.checkRows(4)
tdSql.query('select last(*) from tb') tdSql.query('select last(*) from tb')
...@@ -120,20 +120,20 @@ class TDTestCase: ...@@ -120,20 +120,20 @@ class TDTestCase:
tdSql.checkData(0,0, 1623254400100000001) tdSql.checkData(0,0, 1623254400100000001)
tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001') tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001')
tdSql.execute('insert into tb values(now + 500000000b, 6);') tdSql.execute('insert into tb values(now + 500000000b, 6)')
tdSql.query('select * from tb;') tdSql.query('select * from tb')
tdSql.checkRows(7) tdSql.checkRows(7)
tdLog.debug('testing nanosecond support in other timestamps') tdLog.debug('testing nanosecond support in other timestamps')
tdSql.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') tdSql.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp)')
tdSql.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') tdSql.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\')')
tdSql.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') tdSql.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000)')
tdSql.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') tdSql.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000)')
tdSql.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') tdSql.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999)')
tdSql.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') tdSql.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001)')
tdSql.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') tdSql.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999)')
tdSql.query('select * from tb2;') tdSql.query('select * from tb2')
tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001')
tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000')
tdSql.checkData(2,1,4) tdSql.checkData(2,1,4)
...@@ -141,72 +141,72 @@ class TDTestCase: ...@@ -141,72 +141,72 @@ class TDTestCase:
tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001') tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001')
tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999') tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999')
tdSql.checkRows(6) tdSql.checkRows(6)
tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;') tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\';') tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\'')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000;') tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\';') tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\'')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b;') tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b')
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';') tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,6) tdSql.checkData(0,0,6)
tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000;') tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\';') tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\'')
tdSql.checkRows(0) tdSql.checkRows(0)
tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';') tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\'')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001;') tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001')
tdSql.checkData(0,0,1) tdSql.checkData(0,0,1)
tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;') tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';') tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\'')
tdSql.checkData(0,0,3) tdSql.checkData(0,0,3)
tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999;') tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';') tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\';') tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\'')
tdSql.checkData(0,0,6) tdSql.checkData(0,0,6)
tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999;') tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';') tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,5) tdSql.checkData(0,0,5)
tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\';') tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\'')
tdSql.checkData(0,0,6) tdSql.checkData(0,0,6)
tdSql.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') tdSql.execute('insert into tb2 values(now + 500000000b, 6, now +2d)')
tdSql.query('select * from tb2;') tdSql.query('select * from tb2')
tdSql.checkRows(7) tdSql.checkRows(7)
tdLog.debug('testing ill nanosecond format handling') tdLog.debug('testing ill nanosecond format handling')
tdSql.execute('create table tb3 (ts timestamp, speed int);') tdSql.execute('create table tb3 (ts timestamp, speed int)')
tdSql.error('insert into tb3 values(16232544001500000, 2);') tdSql.error('insert into tb3 values(16232544001500000, 2)')
tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2);') tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2)')
tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\';') tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\'')
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2);') tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2)')
tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\';') tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\'')
tdSql.checkRows(1) tdSql.checkRows(1)
os.system('sudo timedatectl set-ntp on') os.system('sudo timedatectl set-ntp on')
......
...@@ -273,6 +273,7 @@ python3 ./test.py -f query/queryStateWindow.py ...@@ -273,6 +273,7 @@ python3 ./test.py -f query/queryStateWindow.py
# python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py # python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
python3 ./test.py -f query/nestquery_last_row.py python3 ./test.py -f query/nestquery_last_row.py
python3 ./test.py -f query/nestedQuery/nestedQuery.py python3 ./test.py -f query/nestedQuery/nestedQuery.py
python3 ./test.py -f query/nestedQuery/nestedQuery_datacheck.py
python3 ./test.py -f query/queryCnameDisplay.py python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/operator_cost.py # python3 ./test.py -f query/operator_cost.py
# python3 ./test.py -f query/long_where_query.py # python3 ./test.py -f query/long_where_query.py
...@@ -399,13 +400,12 @@ python3 test.py -f alter/alter_cacheLastRow.py ...@@ -399,13 +400,12 @@ python3 test.py -f alter/alter_cacheLastRow.py
python3 ./test.py -f query/querySession.py python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/flushwhiledrop.py
#python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f query/queryWildcardLength.py python3 ./test.py -f query/queryWildcardLength.py
python3 ./test.py -f query/queryTbnameUpperLower.py python3 ./test.py -f query/queryTbnameUpperLower.py
python3 ./test.py -f query/query.py python3 ./test.py -f query/query.py
python3 ./test.py -f query/queryDiffColsOr.py python3 ./test.py -f query/queryDiffColsTagsAndOr.py
python3 ./test.py -f client/nettest.py python3 ./test.py -f client/nettest.py
...@@ -416,6 +416,10 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py ...@@ -416,6 +416,10 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py
python3 ./test.py -f query/queryRegex.py python3 ./test.py -f query/queryRegex.py
python3 ./test.py -f tools/taosdemoTestdatatype.py python3 ./test.py -f tools/taosdemoTestdatatype.py
python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py
python3 ./test.py -f insert/openTsdbJsonInsert.py
#======================p4-end=============== #======================p4-end===============
......
...@@ -1581,7 +1581,7 @@ class TDTestCase: ...@@ -1581,7 +1581,7 @@ class TDTestCase:
# self.td5798() # self.td5798()
# develop branch # develop branch
self.td4889() # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
self.td5798() self.td5798()
def stop(self): def stop(self):
......
...@@ -61,6 +61,12 @@ class TDTestCase: ...@@ -61,6 +61,12 @@ class TDTestCase:
tdSql.query("select count(*) from stb") tdSql.query("select count(*) from stb")
tdSql.checkData(0, 0, 4096) tdSql.checkData(0, 0, 4096)
sql = "create table stb(ts timestamp, "
for i in range(15):
sql += "col%d binary(1022), " % (i + 1)
sql += "col1023 binary(1015))"
tdSql.error(sql)
endTime = time.time() endTime = time.time()
sql = "create table stb(ts timestamp, " sql = "create table stb(ts timestamp, "
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册