提交 bc8e356e 编写于 作者: S shenglian zhou

Merge remote-tracking branch 'origin/develop' into szhou/hotfix/sml-tiny-batch

...@@ -71,7 +71,6 @@ def pre_test(){ ...@@ -71,7 +71,6 @@ def pre_test(){
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
...@@ -146,7 +145,6 @@ def pre_test_noinstall(){ ...@@ -146,7 +145,6 @@ def pre_test_noinstall(){
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
...@@ -218,7 +216,6 @@ def pre_test_mac(){ ...@@ -218,7 +216,6 @@ def pre_test_mac(){
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
git ls-files --stage | grep 160000 | awk '{print $4}' | xargs git rm --cached
git submodule update --init --recursive git submodule update --init --recursive
cd ${WK} cd ${WK}
git reset --hard HEAD~10 git reset --hard HEAD~10
...@@ -253,6 +250,8 @@ def pre_test_mac(){ ...@@ -253,6 +250,8 @@ def pre_test_mac(){
mkdir debug mkdir debug
cd debug cd debug
cmake .. > /dev/null cmake .. > /dev/null
go env -w GOPROXY=https://goproxy.cn,direct
go env -w GO111MODULE=on
cmake --build . cmake --build .
''' '''
return 1 return 1
......
...@@ -64,7 +64,7 @@ To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debi ...@@ -64,7 +64,7 @@ To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debi
sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
``` ```
### Centos 7: ### CentOS 7:
```bash ```bash
sudo yum install epel-release sudo yum install epel-release
sudo yum update sudo yum update
...@@ -82,7 +82,7 @@ To install Apache Maven: ...@@ -82,7 +82,7 @@ To install Apache Maven:
sudo yum install -y maven sudo yum install -y maven
``` ```
### Centos 8 & Fedora: ### CentOS 8 & Fedora:
```bash ```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git sudo dnf install -y gcc gcc-c++ make cmake epel-release git
``` ```
...@@ -100,8 +100,9 @@ sudo dnf install -y maven ...@@ -100,8 +100,9 @@ sudo dnf install -y maven
#### Install build dependencies for taos-tools #### Install build dependencies for taos-tools
To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed. To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed.
```bash ```bash
sudo yum install xz-devel snappy-devel jansson-devel pkgconfig libatomic sudo yum install libz-devel xz-devel snappy-devel jansson-devel pkgconfig libatomic
``` ```
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well.
### Setup golang environment ### Setup golang environment
TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup. TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup.
......
...@@ -83,6 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ...@@ -83,6 +83,10 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 * [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它
* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。 * [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。
## [TDengine 组件与工具](/tools/adapter)
* [taosAdapter用户手册](/tools/adapter)
## [与其他工具的连接](/connections) ## [与其他工具的连接](/connections)
* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据 * [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据
......
...@@ -1052,7 +1052,7 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC ...@@ -1052,7 +1052,7 @@ HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC
示例程序源码位于 示例程序源码位于
* {client_install_directory}/examples/C# * {client_install_directory}/examples/C#
* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23) * [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523)
**注意:** TDengineTest.cs C#示例源程序,包含了数据库连接参数,以及如何执行数据插入、查询等操作。 **注意:** TDengineTest.cs C#示例源程序,包含了数据库连接参数,以及如何执行数据插入、查询等操作。
...@@ -1084,7 +1084,7 @@ dotnet add package TDengine.Connector ...@@ -1084,7 +1084,7 @@ dotnet add package TDengine.Connector
```c# ```c#
using TDengineDriver; using TDengineDriver;
``` ```
* 用户可以参考[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23/TDengineTest)来定义数据库连接参数,以及如何执行数据插入、查询等操作。 * 用户可以参考[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest)来定义数据库连接参数,以及如何执行数据插入、查询等操作。
**注意:** **注意:**
......
...@@ -25,8 +25,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -25,8 +25,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### Grafana ### Grafana
请参考[官方文档](https://grafana.com/grafana/download) 请参考[官方文档](https://grafana.com/grafana/download)
### 安装 TDengine ### TDengine
从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。 从涛思数据官网[下载](http://taosdata.com/cn/all-downloads/)页面下载最新 TDengine-server 2.3.0.0 或以上版本安装。
## 数据链路设置 ## 数据链路设置
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
- 数据写入和查询的性能远超 OpenTSDB; - 数据写入和查询的性能远超 OpenTSDB;
- 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5; - 针对时序数据的高效压缩机制,压缩后在磁盘上的存储空间不到 1/5;
- 安装部署非常简单,单一安装包完成安装部署,除了 taosAdapter 需要依赖 Go 运行环境外,不依赖其他的第三方软件,整个安装部署过程秒级搞定; - 安装部署非常简单,单一安装包完成安装部署,不依赖其他的第三方软件,整个安装部署过程秒级搞定;
- 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。 - 提供的内建函数覆盖 OpenTSDB 支持的全部查询函数,还支持更多的时序数据查询函数、标量函数及聚合函数,支持多种时间窗口聚合、连接查询、表达式运算、多种分组聚合、用户定义排序、以及用户定义函数等高级查询功能。采用类 SQL 的语法规则,更加简单易学,基本上没有学习成本。
- 支持多达 128 个标签,标签总长度可达到 16 KB; - 支持多达 128 个标签,标签总长度可达到 16 KB;
- 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。 - 除 HTTP 之外,还提供 Java、Python、C、Rust、Go 等多种语言的接口,支持 JDBC 等多种企业级标准连接器协议。
...@@ -40,7 +40,11 @@ ...@@ -40,7 +40,11 @@
- **调整数据收集器配置** - **调整数据收集器配置**
在 TDengine 2.3 版本中,后台服务 taosd 启动后一个 HTTP 的服务 taosAdapter 也会自动启用*。*利用 taosAdapter 能够兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议,可以将 collectd 和 StatsD 收集的数据直接推送到TDengine。 在TDengine 2.3版本中,我们发布了taosAdapter ,taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。
用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。
通过taosAdapter,用户可以将 collectd 和 StatsD 收集的数据直接推送到TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter还支持Telegraf、Icinga、TCollector 、node_exporter的数据接入,使用详情参考[taosAdapter](https://www.taosdata.com/cn/documentation/tools/adapter)
如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下: 如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为192.168.1.130,端口为 6046,配置如下:
...@@ -61,24 +65,9 @@ LoadPlugin write_tsdb ...@@ -61,24 +65,9 @@ LoadPlugin write_tsdb
- **调整看板(Dashborad)系统** - **调整看板(Dashborad)系统**
在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。Grafana 暂时还不能够直接连接 TDengine,在 TDengine 的安装目录下 connector/grafanaplugin 有为 Grafana 提供的连接插件。使用该插件的方式很简单: 在数据能够正常写入TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用TDengine提供的Grafana插件请参考[与其他工具的连接](https://www.taosdata.com/cn/documentation/connections#grafana)
首先将grafanaplugin目录下的dist目录整体拷贝到Grafana的插件目录(默认地址为 `/var/lib/grafana/plugins/`),然后重启 Grafana 即可在 **Add Data Source** 菜单下看见 TDengine 数据源。
```shell
sudo cp -r . /var/lib/grafana/plugins/tdengine
sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
# start grafana service
sudo service grafana-server restart
# or with systemd
sudo systemctl start grafana-server
```
TDengine 提供了默认的两套Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。
此外,TDengine 还提供了默认的两套Dashboard 模板,供用户快速查看保存到TDengine库里的信息。你只需要将 Grafana 目录下的模板导入到Grafana中即可激活使用。
![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg) ![](../../images/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg)
...@@ -129,8 +118,8 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的 ...@@ -129,8 +118,8 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的
| 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 | | 序号 | 测量(metric) | 值名称 | 类型 | tag1 | tag2 | tag3 | tag4 | tag5 |
| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | | ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ |
| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | | 1 | memory | value | double | host | memory_type | memory_type_instance | source | n/a |
| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | | 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a |
| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source |
...@@ -191,7 +180,21 @@ TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他 ...@@ -191,7 +180,21 @@ TDengine支持标准的JDBC 3.0接口操纵数据库,你也可以使用其他
为了方便历史数据的迁移工作,我们为数据同步工具DataX提供了插件,能够将数据自动写入到TDengine中,需要注意的是DataX的自动化数据迁移只能够支持单值模型的数据迁移过程。 为了方便历史数据的迁移工作,我们为数据同步工具DataX提供了插件,能够将数据自动写入到TDengine中,需要注意的是DataX的自动化数据迁移只能够支持单值模型的数据迁移过程。
DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见其使用帮助手册 [github.com/taosdata/datax](http://github.com/taosdata/datax) DataX 具体的使用方式及如何使用DataX将数据写入TDengine请参见[基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)
在对DataX进行迁移实践后,我们发现通过启动多个进程,同时迁移多个metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。
| datax实例个数 (并发进程个数) | 迁移记录速度 (条/秒) |
| ---- | -------------- |
| 1 | 约13.9万 |
| 2 | 约21.8万 |
| 3 | 约24.9万 |
| 5 | 约29.5万 |
| 10 | 约33万 |
<br/>(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16核64G硬件设备,channel和batchSize 分别为8和1000,每条记录包含10个tag)
### 2、手动迁移数据 ### 2、手动迁移数据
......
...@@ -79,6 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ...@@ -79,6 +79,10 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment - [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
- [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust - [Rust Connector](/connector/rust): A taosc/RESTful API based TDengine client for Rust
## [Components and Tools](/tools/adapter)
* [taosAdapter](/tools/adapter)
## [Connections with Other Tools](/connections) ## [Connections with Other Tools](/connections)
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization - [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
...@@ -128,4 +132,4 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ...@@ -128,4 +132,4 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## FAQ ## FAQ
- [FAQ: Common questions and answers](/faq) - [FAQ: Common questions and answers](/faq)
\ No newline at end of file
...@@ -855,7 +855,7 @@ Only some configuration parameters related to RESTful interface are listed below ...@@ -855,7 +855,7 @@ Only some configuration parameters related to RESTful interface are listed below
### Example Source Code ### Example Source Code
you can find sample code under follow directions: you can find sample code under follow directions:
* {client_install_directory}/examples/C# * {client_install_directory}/examples/C#
* [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23) * [github C# example source code](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523)
**Tips:** TDengineTest.cs One of C# connector's sample code that include basic examples like connection,sql executions and so on. **Tips:** TDengineTest.cs One of C# connector's sample code that include basic examples like connection,sql executions and so on.
...@@ -888,7 +888,7 @@ dotnet add package TDengine.Connector ...@@ -888,7 +888,7 @@ dotnet add package TDengine.Connector
```C# ```C#
using TDengineDriver; using TDengineDriver;
``` ```
* user can reference from[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%23/TDengineTest) and learn how to define database connection,query,insert and other basic data manipulations. * user can reference from[TDengineTest.cs](https://github.com/taosdata/TDengine/tree/develop/tests/examples/C%2523/TDengineTest) and learn how to define database connection,query,insert and other basic data manipulations.
**Note:** **Note:**
......
# Rapidly build an IT DevOps system with TDengine + Telegraf + Grafana
## Background
TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations.
There are a lot of time-series data in the IT DevOps scenario, for example:
- Metrics of system resource: CPU, memory, IO and network status, etc.
- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business.
A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance.
Here we introduce a way to build an IT DevOps system with TDengine, Telegraf, and Grafana. Even no need one line program code but just modify a few lines of configuration files.
![IT-DevOps-Solutions-Telegraf.png](../../images/IT-DevOps-Solutions-Telegraf.png)
## Installation steps
### Install Telegraf,Grafana and TDengine
Please refer to each component's official document for Telegraf, Grafana, and TDengine installation.
### Telegraf
Please refer to the [official document](https://portal.influxdata.com/downloads/).
### Grafana
Please refer to the [official document](https://grafana.com/grafana/download).
### TDengine
Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/en/all-downloads/).
## Setup data chain
### Download TDengine plugin to Grafana plugin's directory
```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
```
### Modify /etc/telegraf/telegraf.conf
Please add few lines in /etc/telegraf/telegraf.conf as below. Please fill database name for what you desire to save Telegraf's data in TDengine. Please specify the correct value for the hostname of the TDengine server/cluster, username, and password:
```
[[outputs.http]]
url = "http://<TDengine server/cluster host>:6041/influxdb/v1/write?db=<database name>"
method = "POST"
timeout = "5s"
username = "<TDengine's username>"
password = "<TDengine's password>"
data_format = "influx"
influx_max_line_bytes = 250
```
Then restart telegraf:
```
sudo systemctl start telegraf
```
### Import dashboard
Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。
Click the 'gear' icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin.
Click the 'plus' icon from the left bar to select 'Import'. You can download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json then import it to the Grafana. After that, you should see the interface like:
![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png)
## Summary
We demonstrated how to build a full-function IT DevOps system with TDengine, Telegraf, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in a few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website.
# Rapidly build a IT DevOps system with TDengine + collectd/StatsD + Grafana
## Background
TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations.
There are a lot of time-series data in the IT DevOps scenario, for example:
- Metrics of system resource: CPU, memory, IO and network status, etc.
- Metrics for software system: service status, number of connections, number of requests, number of the timeout, number of errors, response time, service type, and other metrics related to the specific business.
A mainstream IT DevOps system generally includes a data-collection module, a data persistent module, and a visualization module. Telegraf and Grafana are some of the most popular data-collection and visualization modules. But data persistent module can be varied. OpenTSDB and InfluxDB are some prominent from others. In recent times, TDengine, as emerged time-series data platform provides more advantages including high performance, high reliability, easier management, easier maintenance.
Here we introduce a way to build an IT DevOps system with TDengine, collectd/statsD, and Grafana. Even no need one line program code but just modify a few lines of configuration files.
![IT-DevOps-Solutions-Collectd-StatsD.png](../../images/IT-DevOps-Solutions-Collectd-StatsD.png)
## Installation steps
Please refer to each component's official document for collectd, StatsD, Grafana, and TDengine installation.
### collectd
Please refer to the [official document](https://collectd.org/documentation.shtml).
### StatsD
Please refer to the [official document](https://github.com/statsd/statsd).
### Grafana
Please refer to the [official document](https://grafana.com/grafana/download).
### TDengine
Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official website](http://taosdata.com/cn/all-downloads/).
## Setup data chain
### Download TDengine plugin to Grafana plugin's directory
```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service
```
### To configure collectd
Please add a few lines in /etc/collectd/collectd.conf as below. Please specify the correct value for hostname and the port number:
```
LoadPlugin network
<Plugin network>
Server "<TDengine cluster/server host>" "<port for collectd>"
</Plugin>
sudo systemctl start collectd
```
### To configure StatsD
Please add a few lines in the config.js file then restart StatsD. Please use the correct hostname and port number of TDengine and taosAdapter:
```
fill backends section with "./backends/repeater"
fill repeater section with { host:'<TDengine server/cluster host>', port: <port for StatsD>}
```
### Import dashboard
Use your Web browser to access IP:3000 to log in to the Grafana management interface. The default username and password are admin/admin。
Click the gear icon from the left bar to select 'Plugins'. You could find the icon of the TDengine data source plugin.
#### Import collectd dashboard
Please download the dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json.
Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like:
![IT-DevOps-Solutions-collectd-dashboard.png](../../images/IT-DevOps-Solutions-collectd-dashboard.png)
#### Import StatsD dashboard
Please download dashboard JSON file from https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json.
Click the 'plus' icon from the left bar to select 'Import'. Then you should see the interface like:
![IT-DevOps-Solutions-statsd-dashboard.png](../../images/IT-DevOps-Solutions-statsd-dashboard.png)
## Summary
We demonstrated how to build a full-function IT DevOps system with TDengine, collectd, StatsD, and Grafana. TDengine supports schemaless protocol data insertion capability from 2.3.0.0. Based on TDengine's powerful ecosystem software integration capability, the user can build a high efficient and easy-to-maintain IT DevOps system in few minutes. Please find more detailed documentation about TDengine high-performance data insertion/query functions and more use cases from TAOS Data's official website.
此差异已折叠。
...@@ -846,7 +846,7 @@ vercomp () { ...@@ -846,7 +846,7 @@ vercomp () {
function is_version_compatible() { function is_version_compatible() {
curr_version=`ls ${script_dir}/driver/libtaos.so* |cut -d '.' -f 3-6` curr_version=`ls ${script_dir}/driver/libtaos.so* | awk -F 'libtaos.so.' '{print $2}'`
if [ -f ${script_dir}/driver/vercomp.txt ]; then if [ -f ${script_dir}/driver/vercomp.txt ]; then
min_compatible_version=`cat ${script_dir}/driver/vercomp.txt` min_compatible_version=`cat ${script_dir}/driver/vercomp.txt`
......
...@@ -368,19 +368,28 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) { ...@@ -368,19 +368,28 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) {
// validate the out put field type for "UNION ALL" subclause // validate the out put field type for "UNION ALL" subclause
static int32_t normalizeVarDataTypeLength(SSqlCmd* pCmd) { static int32_t normalizeVarDataTypeLength(SSqlCmd* pCmd) {
const char* msg1 = "columns in select clause not identical"; const char* msg1 = "columns in select clause not identical";
const char* msg2 = "too many select clause siblings, at most 100 allowed";
int32_t siblings = 0;
int32_t diffSize = 0; int32_t diffSize = 0;
// if there is only one element, the limit of clause is the limit of global result. // if there is only one element, the limit of clause is the limit of global result.
SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo; SQueryInfo* pQueryInfo1 = pCmd->pQueryInfo;
SQueryInfo* pSibling = pQueryInfo1->sibling; SQueryInfo* pSibling = pQueryInfo1->sibling;
// pQueryInfo1 itself
++siblings;
while(pSibling != NULL) { while(pSibling != NULL) {
int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo, &diffSize); int32_t ret = tscFieldInfoCompare(&pQueryInfo1->fieldsInfo, &pSibling->fieldsInfo, &diffSize);
if (ret != 0) { if (ret != 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
if (++siblings > 100) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pSibling = pSibling->sibling; pSibling = pSibling->sibling;
} }
...@@ -2505,6 +2514,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2505,6 +2514,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg13 = "parameter list required"; const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
const char* msg15 = "parameter is out of range [1, 1000]"; const char* msg15 = "parameter is out of range [1, 1000]";
const char* msg16 = "elapsed duration should be greater than or equal to database precision";
switch (functionId) { switch (functionId) {
case TSDB_FUNC_COUNT: { case TSDB_FUNC_COUNT: {
...@@ -2599,19 +2609,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2599,19 +2609,21 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
case TSDB_FUNC_FLOOR: case TSDB_FUNC_FLOOR:
case TSDB_FUNC_ROUND: case TSDB_FUNC_ROUND:
case TSDB_FUNC_STDDEV: case TSDB_FUNC_STDDEV:
case TSDB_FUNC_LEASTSQR: { case TSDB_FUNC_LEASTSQR:
case TSDB_FUNC_ELAPSED: {
// 1. valid the number of parameters // 1. valid the number of parameters
int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList); int32_t numOfParams = (pItem->pNode->Expr.paramList == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->Expr.paramList);
// no parameters or more than one parameter for function // no parameters or more than one parameter for function
if (pItem->pNode->Expr.paramList == NULL || if (pItem->pNode->Expr.paramList == NULL ||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) || (functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && functionId != TSDB_FUNC_ELAPSED && numOfParams != 1) ||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) { ((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3) ||
(functionId == TSDB_FUNC_ELAPSED && numOfParams > 2)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0); tSqlExprItem* pParamElem = taosArrayGet(pItem->pNode->Expr.paramList, 0);
if (pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) { if ((pParamElem->pNode->tokenId != TK_ALL && pParamElem->pNode->tokenId != TK_ID) || 0 == pParamElem->pNode->columnName.n) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
...@@ -2620,6 +2632,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2620,6 +2632,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
} }
// elapsed only can be applied to primary key
if (functionId == TSDB_FUNC_ELAPSED && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "elapsed only can be applied to primary key");
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta); STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
...@@ -2631,7 +2648,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2631,7 +2648,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// 2. check if sql function can be applied on this column data type // 2. check if sql function can be applied on this column data type
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
if (!IS_NUMERIC_TYPE(pSchema->type)) { if (!IS_NUMERIC_TYPE(pSchema->type) && (functionId != TSDB_FUNC_ELAPSED)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) { } else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
...@@ -2676,11 +2693,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2676,11 +2693,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
} else if (functionId == TSDB_FUNC_IRATE) { } else if (functionId == TSDB_FUNC_IRATE) {
int64_t prec = info.precision; int64_t prec = info.precision;
tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
} else if (functionId == TSDB_FUNC_DERIVATIVE) { } else if (functionId == TSDB_FUNC_DERIVATIVE || (functionId == TSDB_FUNC_ELAPSED && 2 == numOfParams)) {
char val[8] = {0}; char val[8] = {0};
int64_t tickPerSec = 0; int64_t tickPerSec = 0;
if (tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) { if ((TSDB_DATA_TYPE_NULL == pParamElem[1].pNode->value.nType) || tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
...@@ -2690,23 +2707,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -2690,23 +2707,27 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
} }
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { if ((tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) && (functionId == TSDB_FUNC_DERIVATIVE)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
} } else if (tickPerSec <= 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16);
}
tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
memset(val, 0, tListLen(val)); if (functionId == TSDB_FUNC_DERIVATIVE) {
memset(val, 0, tListLen(val));
if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) { if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
int64_t v = *(int64_t*) val; int64_t v = *(int64_t*) val;
if (v != 0 && v != 1) { if (v != 0 && v != 1) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
} }
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
}
} }
SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
...@@ -3125,7 +3146,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col ...@@ -3125,7 +3146,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
default: { default: {
pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n);
if (pUdfInfo == NULL) { if (pUdfInfo == NULL) {
...@@ -3496,7 +3516,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) { ...@@ -3496,7 +3516,7 @@ int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) {
if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) || if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) ||
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) || (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) ||
(functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) ||
(functionId == TSDB_FUNC_SAMPLE)) { (functionId == TSDB_FUNC_SAMPLE) || (functionId == TSDB_FUNC_ELAPSED)) {
if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes, if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->base.param[0].i64, &type, &bytes,
&interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) { &interBytes, 0, true, NULL) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
...@@ -3551,8 +3571,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) { ...@@ -3551,8 +3571,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
} }
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP are not allowed to apply to super table directly"; const char* msg1 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed are not allowed to apply to super table directly";
const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP only support group by tbname for super table query"; const char* msg2 = "TWA/Diff/Derivative/Irate/CSUM/MAVG/SAMPLE/INTERP/Elapsed only support group by tbname for super table query";
const char* msg3 = "functions not support for super table query"; const char* msg3 = "functions not support for super table query";
// filter sql function not supported by metric query yet. // filter sql function not supported by metric query yet.
...@@ -3570,7 +3590,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) ...@@ -3570,7 +3590,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
} }
if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivLikeQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo) || if (tscIsTWAQuery(pQueryInfo) || tscIsDiffDerivLikeQuery(pQueryInfo) || tscIsIrateQuery(pQueryInfo) ||
tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo)) { tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_SAMPLE) || tscGetPointInterpQuery(pQueryInfo) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) { if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true; return true;
...@@ -7474,7 +7494,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* ...@@ -7474,7 +7494,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
const char* msg3 = "group by/session/state_window not allowed on projection query"; const char* msg3 = "group by/session/state_window not allowed on projection query";
const char* msg4 = "retrieve tags not compatible with group by or interval query"; const char* msg4 = "retrieve tags not compatible with group by or interval query";
const char* msg5 = "functions can not be mixed up"; const char* msg5 = "functions can not be mixed up";
const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg only support group by tbname"; const char* msg6 = "TWA/Diff/Derivative/Irate/CSum/MAvg/Elapsed only support group by tbname";
// only retrieve tags, group by is not supportted // only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) { if (tscQueryTags(pQueryInfo)) {
...@@ -7536,7 +7556,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* ...@@ -7536,7 +7556,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
} }
if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || if ((!pQueryInfo->stateWindow) && (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA ||
f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG)) { f == TSDB_FUNC_IRATE || f == TSDB_FUNC_CSUM || f == TSDB_FUNC_MAVG || f == TSDB_FUNC_ELAPSED)) {
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) { for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j); SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
if (j == 0) { if (j == 0) {
...@@ -7585,7 +7605,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* ...@@ -7585,7 +7605,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) {
const char* msg1 = "TWA/Diff/Derivative/Irate are not allowed to apply to super table without group by tbname"; const char* msg1 = "TWA/Diff/Derivative/Irate/elapsed are not allowed to apply to super table without group by tbname";
const char* msg2 = "group by not supported in nested interp query"; const char* msg2 = "group by not supported in nested interp query";
const char* msg3 = "order by not supported in nested interp query"; const char* msg3 = "order by not supported in nested interp query";
const char* msg4 = "first column should be timestamp for interp query"; const char* msg4 = "first column should be timestamp for interp query";
...@@ -7598,7 +7618,7 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) { ...@@ -7598,7 +7618,7 @@ int32_t validateFunctionFromUpstream(SQueryInfo* pQueryInfo, char* msg) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i); SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId; int32_t f = pExpr->base.functionId;
if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF) { if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ELAPSED) {
for (int32_t j = 0; j < upNum; ++j) { for (int32_t j = 0; j < upNum; ++j) {
SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j); SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, j);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pUp, 0);
...@@ -9121,7 +9141,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -9121,7 +9141,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column"; const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
const char* msg5 = "only tag query not compatible with normal column filter"; const char* msg5 = "only tag query not compatible with normal column filter";
const char* msg6 = "not support stddev/percentile/interp in the outer query yet"; const char* msg6 = "not support stddev/percentile/interp in the outer query yet";
const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery"; const char* msg7 = "derivative/twa/rate/irate/diff requires timestamp column exists in subquery";
const char* msg8 = "condition missing for join query"; const char* msg8 = "condition missing for join query";
const char* msg9 = "not support 3 level select"; const char* msg9 = "not support 3 level select";
...@@ -9206,7 +9226,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf ...@@ -9206,7 +9226,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
SExprInfo* pExpr = tscExprGet(pQueryInfo, i); SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
int32_t f = pExpr->base.functionId; int32_t f = pExpr->base.functionId;
if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) { if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE || f == TSDB_FUNC_RATE || f== TSDB_FUNC_DIFF) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
} }
} }
......
...@@ -837,7 +837,11 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, ...@@ -837,7 +837,11 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
assert(pExpr->resColId < 0); if (pExpr->resColId >= 0) {
tscError("result column id underflowed: %d", pExpr->resColId);
return TSDB_CODE_TSC_RES_TOO_MANY;
}
SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg); SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg);
SColIndex* pIndex = &pSqlExpr->colInfo; SColIndex* pIndex = &pSqlExpr->colInfo;
...@@ -943,6 +947,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -943,6 +947,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tsCompQuery = query.tsCompQuery; pQueryMsg->tsCompQuery = query.tsCompQuery;
pQueryMsg->simpleAgg = query.simpleAgg; pQueryMsg->simpleAgg = query.simpleAgg;
pQueryMsg->pointInterpQuery = query.pointInterpQuery; pQueryMsg->pointInterpQuery = query.pointInterpQuery;
pQueryMsg->needTableSeqScan = query.needTableSeqScan;
pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->needReverseScan = query.needReverseScan;
pQueryMsg->stateWindow = query.stateWindow; pQueryMsg->stateWindow = query.stateWindow;
pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->numOfTags = htonl(numOfTags);
......
...@@ -394,6 +394,10 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { ...@@ -394,6 +394,10 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
return true; return true;
} }
bool tscNeedTableSeqScan(SQueryInfo* pQueryInfo) {
return pQueryInfo->stableQuery && (tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_TWA) || tscQueryContainsFunction(pQueryInfo, TSDB_FUNC_ELAPSED));
}
bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) {
size_t size = tscNumOfExprs(pQueryInfo); size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) { for (int32_t i = 0; i < size; ++i) {
...@@ -410,7 +414,6 @@ bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) { ...@@ -410,7 +414,6 @@ bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) {
return false; return false;
} }
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) { bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) {
if (tscIsProjectionQuery(pQueryInfo)) { if (tscIsProjectionQuery(pQueryInfo)) {
return false; return false;
...@@ -543,7 +546,7 @@ bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) { ...@@ -543,7 +546,7 @@ bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo) {
} }
int32_t functionId = pExpr->base.functionId; int32_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP) { if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_ELAPSED) {
return true; return true;
} }
} }
...@@ -5073,6 +5076,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt ...@@ -5073,6 +5076,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo); pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo);
pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo);
pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo);
pQueryAttr->needTableSeqScan = tscNeedTableSeqScan(pQueryInfo);
pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo);
pQueryAttr->distinct = pQueryInfo->distinct; pQueryAttr->distinct = pQueryInfo->distinct;
pQueryAttr->sw = pQueryInfo->sessionWindow; pQueryAttr->sw = pQueryInfo->sessionWindow;
......
...@@ -9,15 +9,11 @@ import java.sql.SQLException; ...@@ -9,15 +9,11 @@ import java.sql.SQLException;
import java.sql.Statement; import java.sql.Statement;
/** /**
* @author huolibo@qq.com * this class is an extension of {@link Statement}. e.g.:
* @version v1.0.0
* @JDK: 1.8
* @description: this class is an extension of {@link Statement}. use like:
* Statement statement = conn.createStatement(); * Statement statement = conn.createStatement();
* SchemalessStatement schemalessStatement = new SchemalessStatement(statement); * SchemalessStatement schemalessStatement = new SchemalessStatement(statement);
* schemalessStatement.execute(sql); * schemalessStatement.execute(sql);
* schemalessStatement.executeSchemaless(lines, SchemalessProtocolType, SchemalessTimestampType); * schemalessStatement.insert(lines, SchemalessProtocolType, SchemalessTimestampType);
* @since 2021-11-03 17:10
*/ */
public class SchemalessStatement extends AbstractStatementWrapper { public class SchemalessStatement extends AbstractStatementWrapper {
public SchemalessStatement(Statement statement) { public SchemalessStatement(Statement statement) {
...@@ -27,12 +23,12 @@ public class SchemalessStatement extends AbstractStatementWrapper { ...@@ -27,12 +23,12 @@ public class SchemalessStatement extends AbstractStatementWrapper {
/** /**
* batch insert schemaless lines * batch insert schemaless lines
* *
* @param lines schemaless data * @param lines schemaless lines
* @param protocolType schemaless type {@link SchemalessProtocolType} * @param protocolType schemaless type {@link SchemalessProtocolType}
* @param timestampType Time precision {@link SchemalessTimestampType} * @param timestampType Time precision {@link SchemalessTimestampType}
* @throws SQLException execute insert exception * @throws SQLException execute insert exception
*/ */
public void executeSchemaless(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { public void insert(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
Connection connection = this.getConnection(); Connection connection = this.getConnection();
if (connection instanceof TSDBConnection) { if (connection instanceof TSDBConnection) {
TSDBConnection tsdbConnection = (TSDBConnection) connection; TSDBConnection tsdbConnection = (TSDBConnection) connection;
...@@ -52,7 +48,7 @@ public class SchemalessStatement extends AbstractStatementWrapper { ...@@ -52,7 +48,7 @@ public class SchemalessStatement extends AbstractStatementWrapper {
* @param timestampType Time precision {@link SchemalessTimestampType} * @param timestampType Time precision {@link SchemalessTimestampType}
* @throws SQLException execute insert exception * @throws SQLException execute insert exception
*/ */
public void executeSchemaless(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { public void insert(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
executeSchemaless(new String[]{line}, protocolType, timestampType); insert(new String[]{line}, protocolType, timestampType);
} }
} }
...@@ -29,7 +29,7 @@ public class SchemalessInsertTest { ...@@ -29,7 +29,7 @@ public class SchemalessInsertTest {
// when // when
try (Statement statement = conn.createStatement(); try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); schemalessStatement.insert(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
} }
// then // then
...@@ -64,7 +64,7 @@ public class SchemalessInsertTest { ...@@ -64,7 +64,7 @@ public class SchemalessInsertTest {
// when // when
try (Statement statement = conn.createStatement(); try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED); schemalessStatement.insert(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
} }
// then // then
...@@ -116,7 +116,7 @@ public class SchemalessInsertTest { ...@@ -116,7 +116,7 @@ public class SchemalessInsertTest {
// when // when
try (Statement statement = conn.createStatement(); try (Statement statement = conn.createStatement();
SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) { SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
schemalessStatement.executeSchemaless(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); schemalessStatement.insert(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED);
} }
// then // then
......
...@@ -2,7 +2,6 @@ package com.taosdata.jdbc.cases; ...@@ -2,7 +2,6 @@ package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.TSDBErrorNumbers;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
...@@ -59,38 +58,31 @@ public class AuthenticationTest { ...@@ -59,38 +58,31 @@ public class AuthenticationTest {
@Test @Test
public void test() throws SQLException { public void test() throws SQLException {
// change password // change password
String url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=taosdata"; conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=taosdata");
try (Connection conn = DriverManager.getConnection(url); Statement stmt = conn.createStatement();
Statement stmt = conn.createStatement();) { stmt.execute("alter user " + user + " pass '" + password + "'");
stmt.execute("alter user " + user + " pass '" + password + "'"); stmt.close();
} conn.close();
// use new to login and execute query // use new to login and execute query
url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password);
try (Connection conn = DriverManager.getConnection(url); stmt = conn.createStatement();
Statement stmt = conn.createStatement()) { stmt.execute("show databases");
stmt.execute("show databases"); ResultSet rs = stmt.getResultSet();
ResultSet rs = stmt.getResultSet(); ResultSetMetaData meta = rs.getMetaData();
ResultSetMetaData meta = rs.getMetaData(); while (rs.next()) {
while (rs.next()) { for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ":" + rs.getString(i) + "\t");
} }
System.out.println();
} }
// change password back // change password back
url = "jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password; conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password);
try (Connection conn = DriverManager.getConnection(url); stmt = conn.createStatement();
Statement stmt = conn.createStatement()) { stmt.execute("alter user " + user + " pass 'taosdata'");
stmt.execute("alter user " + user + " pass 'taosdata'"); stmt.close();
} conn.close();
}
@Before
public void before() {
try {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
} }
} }
...@@ -18,9 +18,8 @@ public class InsertDbwithoutUseDbTest { ...@@ -18,9 +18,8 @@ public class InsertDbwithoutUseDbTest {
private static final Random random = new Random(System.currentTimeMillis()); private static final Random random = new Random(System.currentTimeMillis());
@Test @Test
public void case001() throws ClassNotFoundException, SQLException { public void case001() throws SQLException {
// prepare schema // prepare schema
Class.forName("com.taosdata.jdbc.TSDBDriver");
String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(url, properties); Connection conn = DriverManager.getConnection(url, properties);
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
...@@ -51,9 +50,8 @@ public class InsertDbwithoutUseDbTest { ...@@ -51,9 +50,8 @@ public class InsertDbwithoutUseDbTest {
} }
@Test @Test
public void case002() throws ClassNotFoundException, SQLException { public void case002() throws SQLException {
// prepare the schema // prepare the schema
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
final String url = "jdbc:TAOS-RS://" + host + ":6041/inWithoutDb?user=root&password=taosdata"; final String url = "jdbc:TAOS-RS://" + host + ":6041/inWithoutDb?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(url, properties); Connection conn = DriverManager.getConnection(url, properties);
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
......
...@@ -25,7 +25,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -25,7 +25,7 @@ public class TimestampPrecisonInNanoRestTest {
private static final String date4 = format.format(new Date(timestamp1 + 10L)); private static final String date4 = format.format(new Date(timestamp1 + 10L));
private static final String date2 = date1 + "123455"; private static final String date2 = date1 + "123455";
private static final String date3 = date4 + "123456"; private static final String date3 = date4 + "123456";
private static Connection conn; private static Connection conn;
...@@ -43,7 +43,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -43,7 +43,7 @@ public class TimestampPrecisonInNanoRestTest {
stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("drop database if exists " + ns_timestamp_db);
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
stmt.close(); stmt.close();
} }
...@@ -54,7 +54,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -54,7 +54,7 @@ public class TimestampPrecisonInNanoRestTest {
stmt.execute("drop database if exists " + ns_timestamp_db); stmt.execute("drop database if exists " + ns_timestamp_db);
stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'");
stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)");
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)");
stmt.close(); stmt.close();
} }
...@@ -105,7 +105,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -105,7 +105,7 @@ public class TimestampPrecisonInNanoRestTest {
@Test @Test
public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
checkCount(1l, rs); checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'");
...@@ -139,7 +139,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -139,7 +139,7 @@ public class TimestampPrecisonInNanoRestTest {
public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
long timestamp4 = timestamp1 * 1000_000 + 123123; long timestamp4 = timestamp1 * 1000_000 + 123123;
stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
checkCount(1l, rs); checkCount(1l, rs);
rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'");
...@@ -215,7 +215,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -215,7 +215,7 @@ public class TimestampPrecisonInNanoRestTest {
} catch (SQLException e) { } catch (SQLException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@Test @Test
public void canQueryLargerThanInNumberTypeForFirstCol() { public void canQueryLargerThanInNumberTypeForFirstCol() {
...@@ -279,7 +279,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -279,7 +279,7 @@ public class TimestampPrecisonInNanoRestTest {
} catch (SQLException e) { } catch (SQLException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@Test @Test
public void canQueryLessThanInDateTypeForFirstCol() { public void canQueryLessThanInDateTypeForFirstCol() {
...@@ -347,7 +347,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -347,7 +347,7 @@ public class TimestampPrecisonInNanoRestTest {
} catch (SQLException e) { } catch (SQLException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@Test @Test
public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() {
...@@ -466,7 +466,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -466,7 +466,7 @@ public class TimestampPrecisonInNanoRestTest {
} }
@Test @Test
public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol() {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)");
ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather");
...@@ -477,7 +477,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -477,7 +477,7 @@ public class TimestampPrecisonInNanoRestTest {
} }
@Test @Test
public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ public void canIntervalAndSlidingAcceptNsUnitForFirstCol() {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
rs.next(); rs.next();
...@@ -492,7 +492,7 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -492,7 +492,7 @@ public class TimestampPrecisonInNanoRestTest {
} }
@Test @Test
public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ public void canIntervalAndSlidingAcceptNsUnitForSecondCol() {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)");
rs.next(); rs.next();
...@@ -506,21 +506,17 @@ public class TimestampPrecisonInNanoRestTest { ...@@ -506,21 +506,17 @@ public class TimestampPrecisonInNanoRestTest {
} }
} }
@Test @Test(expected = SQLException.class)
public void testDataOutOfRangeExceptionForFirstCol() { public void testDataOutOfRangeExceptionForFirstCol() throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)");
} catch (SQLException e) {
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
} }
} }
@Test @Test(expected = SQLException.class)
public void testDataOutOfRangeExceptionForSecondCol() { public void testDataOutOfRangeExceptionForSecondCol() throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)");
} catch (SQLException e) {
Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage());
} }
} }
......
...@@ -373,11 +373,12 @@ public class RestfulConnectionTest { ...@@ -373,11 +373,12 @@ public class RestfulConnectionTest {
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/log?user=root&password=taosdata", properties); conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties);
// create test database for test cases // create test database for test cases
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.execute("create database if not exists test"); stmt.execute("create database if not exists test");
} }
} }
@AfterClass @AfterClass
......
...@@ -9,9 +9,10 @@ import java.util.Random; ...@@ -9,9 +9,10 @@ import java.util.Random;
@FixMethodOrder(MethodSorters.NAME_ASCENDING) @FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class RestfulJDBCTest { public class RestfulJDBCTest {
private static final String host = "127.0.0.1"; // private static final String host = "127.0.0.1";
private final Random random = new Random(System.currentTimeMillis()); private static final String host = "master";
private Connection connection; private static final Random random = new Random(System.currentTimeMillis());
private static Connection connection;
@Test @Test
public void testCase001() throws SQLException { public void testCase001() throws SQLException {
...@@ -129,15 +130,23 @@ public class RestfulJDBCTest { ...@@ -129,15 +130,23 @@ public class RestfulJDBCTest {
} }
} }
@Before @BeforeClass
public void before() throws SQLException { public static void beforeClass() {
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata&httpKeepAlive=false"); try {
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
} catch (SQLException e) {
e.printStackTrace();
}
} }
@After @AfterClass
public void after() throws SQLException { public static void afterClass() throws SQLException {
if (connection != null) if (connection != null) {
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists restful_test");
stmt.close();
connection.close(); connection.close();
}
} }
} }
...@@ -186,22 +186,17 @@ public class RestfulResultSetMetaDataTest { ...@@ -186,22 +186,17 @@ public class RestfulResultSetMetaDataTest {
} }
@BeforeClass @BeforeClass
public static void beforeClass() { public static void beforeClass() throws SQLException {
try { conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); stmt = conn.createStatement();
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); stmt.execute("create database if not exists restful_test");
stmt = conn.createStatement(); stmt.execute("use restful_test");
stmt.execute("create database if not exists restful_test"); stmt.execute("drop table if exists weather");
stmt.execute("use restful_test"); stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))");
stmt.execute("drop table if exists weather"); stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')");
stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); rs = stmt.executeQuery("select * from restful_test.weather");
stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); rs.next();
rs = stmt.executeQuery("select * from restful_test.weather"); meta = rs.getMetaData();
rs.next();
meta = rs.getMetaData();
} catch (ClassNotFoundException | SQLException e) {
e.printStackTrace();
}
} }
@AfterClass @AfterClass
......
...@@ -17,7 +17,8 @@ import java.text.SimpleDateFormat; ...@@ -17,7 +17,8 @@ import java.text.SimpleDateFormat;
public class RestfulResultSetTest { public class RestfulResultSetTest {
private static final String host = "127.0.0.1"; // private static final String host = "127.0.0.1";
private static final String host = "master";
private static Connection conn; private static Connection conn;
private static Statement stmt; private static Statement stmt;
private static ResultSet rs; private static ResultSet rs;
...@@ -658,35 +659,29 @@ public class RestfulResultSetTest { ...@@ -658,35 +659,29 @@ public class RestfulResultSetTest {
} }
@BeforeClass @BeforeClass
public static void beforeClass() { public static void beforeClass() throws SQLException {
try { conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); stmt = conn.createStatement();
stmt = conn.createStatement(); stmt.execute("drop database if exists restful_test");
stmt.execute("create database if not exists restful_test"); stmt.execute("create database if not exists restful_test");
stmt.execute("use restful_test"); stmt.execute("use restful_test");
stmt.execute("drop table if exists weather"); stmt.execute("drop table if exists weather");
stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))");
stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')");
rs = stmt.executeQuery("select * from restful_test.weather"); rs = stmt.executeQuery("select * from restful_test.weather");
rs.next(); rs.next();
} catch (SQLException e) {
e.printStackTrace();
}
} }
@AfterClass @AfterClass
public static void afterClass() { public static void afterClass() throws SQLException {
try { if (rs != null)
if (rs != null) rs.close();
rs.close(); if (stmt != null) {
if (stmt != null) stmt.execute("drop database if exists restful_test");
stmt.close(); stmt.close();
if (conn != null)
conn.close();
} catch (SQLException e) {
e.printStackTrace();
} }
if (conn != null)
conn.close();
} }
} }
\ No newline at end of file
...@@ -572,11 +572,14 @@ public class SQLTest { ...@@ -572,11 +572,14 @@ public class SQLTest {
@BeforeClass @BeforeClass
public static void before() throws SQLException { public static void before() throws SQLException {
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
} }
@AfterClass @AfterClass
public static void after() throws SQLException { public static void after() throws SQLException {
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists restful_test");
stmt.close();
connection.close(); connection.close();
} }
......
package com.taosdata.jdbc.rs; package com.taosdata.jdbc.rs;
import org.junit.*; import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.sql.*; import java.sql.*;
...@@ -9,9 +12,8 @@ public class WasNullTest { ...@@ -9,9 +12,8 @@ public class WasNullTest {
private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
private Connection conn; private Connection conn;
@Test @Test
public void testGetTimestamp() { public void testGetTimestamp() throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.execute("drop table if exists weather"); stmt.execute("drop table if exists weather");
stmt.execute("create table if not exists weather(f1 timestamp, f2 timestamp, f3 int)"); stmt.execute("create table if not exists weather(f1 timestamp, f2 timestamp, f3 int)");
...@@ -34,14 +36,11 @@ public class WasNullTest { ...@@ -34,14 +36,11 @@ public class WasNullTest {
} }
} }
} }
} catch (SQLException e) {
e.printStackTrace();
} }
} }
@Test @Test
public void testGetObject() { public void testGetObject() throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
stmt.execute("drop table if exists weather"); stmt.execute("drop table if exists weather");
stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))");
...@@ -63,32 +62,25 @@ public class WasNullTest { ...@@ -63,32 +62,25 @@ public class WasNullTest {
} }
} }
} catch (SQLException e) {
e.printStackTrace();
} }
} }
@Before @Before
public void before() { public void before() throws SQLException {
try { conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); try (Statement stmt = conn.createStatement()) {
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists restful_test"); stmt.execute("drop database if exists restful_test");
stmt.execute("create database if not exists restful_test"); stmt.execute("create database if not exists restful_test");
} catch (SQLException e) { stmt.execute("use restful_test");
e.printStackTrace();
} }
} }
@After @After
public void after() { public void after() throws SQLException {
try { if (conn != null) {
Statement statement = conn.createStatement(); Statement statement = conn.createStatement();
statement.execute("drop database if exists restful_test"); statement.execute("drop database if exists restful_test");
if (conn != null) conn.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
} }
} }
} }
...@@ -78,14 +78,14 @@ static SStep tsDnodeSteps[] = { ...@@ -78,14 +78,14 @@ static SStep tsDnodeSteps[] = {
{"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt},
{"dnode-mread", dnodeInitMRead, NULL}, {"dnode-mread", dnodeInitMRead, NULL},
{"dnode-mwrite", dnodeInitMWrite, NULL}, {"dnode-mwrite", dnodeInitMWrite, NULL},
{"dnode-mpeer", dnodeInitMPeer, NULL}, {"dnode-mpeer", dnodeInitMPeer, NULL},
{"dnode-client", dnodeInitClient, dnodeCleanupClient}, {"dnode-client", dnodeInitClient, dnodeCleanupClient},
{"dnode-server", dnodeInitServer, dnodeCleanupServer}, {"dnode-server", dnodeInitServer, dnodeCleanupServer},
{"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes},
{"dnode-modules", dnodeInitModules, dnodeCleanupModules}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules},
{"dnode-mread", NULL, dnodeCleanupMRead}, {"dnode-mread", NULL, dnodeCleanupMRead},
{"dnode-mwrite", NULL, dnodeCleanupMWrite}, {"dnode-mwrite", NULL, dnodeCleanupMWrite},
{"dnode-mpeer", NULL, dnodeCleanupMPeer}, {"dnode-mpeer", NULL, dnodeCleanupMPeer},
{"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-shell", dnodeInitShell, dnodeCleanupShell},
{"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer},
{"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry},
...@@ -121,7 +121,7 @@ static int dnodeCreateDir(const char *dir) { ...@@ -121,7 +121,7 @@ static int dnodeCreateDir(const char *dir) {
if (mkdir(dir, 0755) != 0 && errno != EEXIST) { if (mkdir(dir, 0755) != 0 && errno != EEXIST) {
return -1; return -1;
} }
return 0; return 0;
} }
...@@ -263,8 +263,8 @@ static int32_t dnodeInitStorage() { ...@@ -263,8 +263,8 @@ static int32_t dnodeInitStorage() {
if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) { if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno)); dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno));
return -1; return -1;
} }
if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) { if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) {
dError("failed to init TFS since %s", tstrerror(terrno)); dError("failed to init TFS since %s", tstrerror(terrno));
return -1; return -1;
...@@ -296,7 +296,7 @@ static int32_t dnodeInitStorage() { ...@@ -296,7 +296,7 @@ static int32_t dnodeInitStorage() {
if (dnodeCreateDir(tsMnodeDir) < 0) { if (dnodeCreateDir(tsMnodeDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno)); dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno));
return -1; return -1;
} }
if (dnodeCreateDir(tsDnodeDir) < 0) { if (dnodeCreateDir(tsDnodeDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno)); dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno));
......
...@@ -127,9 +127,20 @@ int32_t dnodeInitVnodes() { ...@@ -127,9 +127,20 @@ int32_t dnodeInitVnodes() {
int32_t threadNum = tsNumOfCores; int32_t threadNum = tsNumOfCores;
int32_t vnodesPerThread = numOfVnodes / threadNum + 1; int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread)); SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread));
if (threads == NULL) {
return TSDB_CODE_DND_OUT_OF_MEMORY;
}
for (int32_t t = 0; t < threadNum; ++t) { for (int32_t t = 0; t < threadNum; ++t) {
threads[t].threadIndex = t; threads[t].threadIndex = t;
threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t)); threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t));
if (threads[t].vnodeList == NULL) {
dError("vnodeList allocation failed");
status = TSDB_CODE_DND_OUT_OF_MEMORY;
goto DNODE_INIT_VNODES_OUT;
}
} }
for (int32_t v = 0; v < numOfVnodes; ++v) { for (int32_t v = 0; v < numOfVnodes; ++v) {
...@@ -163,18 +174,24 @@ int32_t dnodeInitVnodes() { ...@@ -163,18 +174,24 @@ int32_t dnodeInitVnodes() {
} }
openVnodes += pThread->opened; openVnodes += pThread->opened;
failedVnodes += pThread->failed; failedVnodes += pThread->failed;
free(pThread->vnodeList);
} }
free(threads);
dInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes); dInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes);
if (failedVnodes != 0) { if (failedVnodes != 0) {
dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes); dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes);
return -1; status = TSDB_CODE_DND_VNODE_OPEN_FAILED;
} }
return TSDB_CODE_SUCCESS; DNODE_INIT_VNODES_OUT:
for (int32_t t = 0; t < threadNum; ++t) {
SOpenVnodeThread *pThread = &threads[t];
free(pThread->vnodeList);
}
free(threads);
return status;
} }
void dnodeCleanupVnodes() { void dnodeCleanupVnodes() {
......
此差异已折叠。
...@@ -475,6 +475,7 @@ typedef struct { ...@@ -475,6 +475,7 @@ typedef struct {
bool tsCompQuery; // is tscomp query bool tsCompQuery; // is tscomp query
bool simpleAgg; bool simpleAgg;
bool pointInterpQuery; // point interpolation query bool pointInterpQuery; // point interpolation query
bool needTableSeqScan; // need scan table by table
bool needReverseScan; // need reverse scan bool needReverseScan; // need reverse scan
bool stateWindow; // state window flag bool stateWindow; // state window flag
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "taos.h" #include "taos.h"
#include "shellCommand.h" #include "shellCommand.h"
#define SHELL_INPUT_MAX_COMMAND_SIZE 500000 #define SHELL_INPUT_MAX_COMMAND_SIZE 10000
extern char configDir[]; extern char configDir[];
......
...@@ -43,7 +43,7 @@ ELSE () ...@@ -43,7 +43,7 @@ ELSE ()
COMMAND git clean -f -d COMMAND git clean -f -d
BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND INSTALL_COMMAND
COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 > /dev/null && ./upx taosadapter || : COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || :
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
......
Subproject commit 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1 Subproject commit 88346a2e4e2e9282d2ec8b8c5264ca1ec23698a1
...@@ -79,6 +79,8 @@ extern "C" { ...@@ -79,6 +79,8 @@ extern "C" {
#define TSDB_FUNC_BLKINFO 39 #define TSDB_FUNC_BLKINFO 39
#define TSDB_FUNC_ELAPSED 40
/////////////////////////////////////////// ///////////////////////////////////////////
// the following functions is not implemented. // the following functions is not implemented.
// after implementation, move them before TSDB_FUNC_BLKINFO. also make TSDB_FUNC_BLKINFO the maxium function index // after implementation, move them before TSDB_FUNC_BLKINFO. also make TSDB_FUNC_BLKINFO the maxium function index
......
...@@ -230,6 +230,7 @@ typedef struct SQueryAttr { ...@@ -230,6 +230,7 @@ typedef struct SQueryAttr {
bool diffQuery; // is diff query bool diffQuery; // is diff query
bool simpleAgg; bool simpleAgg;
bool pointInterpQuery; // point interpolation query bool pointInterpQuery; // point interpolation query
bool needTableSeqScan; // need scan table by table
bool needReverseScan; // need reverse scan bool needReverseScan; // need reverse scan
bool distinct; // distinct query or not bool distinct; // distinct query or not
bool stateWindow; // window State on sub/normal table bool stateWindow; // window State on sub/normal table
......
...@@ -196,6 +196,12 @@ typedef struct { ...@@ -196,6 +196,12 @@ typedef struct {
char *taglists; char *taglists;
} SSampleFuncInfo; } SSampleFuncInfo;
typedef struct SElapsedInfo {
int8_t hasResult;
TSKEY min;
TSKEY max;
} SElapsedInfo;
typedef struct { typedef struct {
bool valueAssigned; bool valueAssigned;
union { union {
...@@ -371,6 +377,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -371,6 +377,11 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*bytes = sizeof(STwaInfo); *bytes = sizeof(STwaInfo);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_ELAPSED) {
*type = TSDB_DATA_TYPE_BINARY;
*bytes = sizeof(SElapsedInfo);
*interBytes = *bytes;
return TSDB_CODE_SUCCESS;
} }
} }
...@@ -471,6 +482,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -471,6 +482,10 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*bytes = sizeof(SStddevdstInfo); *bytes = sizeof(SStddevdstInfo);
*interBytes = (*bytes); *interBytes = (*bytes);
} else if (functionId == TSDB_FUNC_ELAPSED) {
*type = TSDB_DATA_TYPE_DOUBLE;
*bytes = tDataTypes[*type].bytes;
*interBytes = sizeof(SElapsedInfo);
} else { } else {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
...@@ -480,7 +495,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -480,7 +495,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
// TODO use hash table // TODO use hash table
int32_t isValidFunction(const char* name, int32_t len) { int32_t isValidFunction(const char* name, int32_t len) {
for(int32_t i = 0; i <= TSDB_FUNC_BLKINFO; ++i) { for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) {
int32_t nameLen = (int32_t) strlen(aAggs[i].name); int32_t nameLen = (int32_t) strlen(aAggs[i].name);
if (len != nameLen) { if (len != nameLen) {
continue; continue;
...@@ -1080,11 +1095,11 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1080,11 +1095,11 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
if ((*retVal < pData[i]) ^ isMin) { if ((*retVal < pData[i]) ^ isMin) {
*retVal = pData[i]; *retVal = pData[i];
TSKEY k = tsList[i]; if(tsList) {
TSKEY k = tsList[i];
DO_UPDATE_TAG_COLUMNS(pCtx, k); DO_UPDATE_TAG_COLUMNS(pCtx, k);
}
} }
*notNullElems += 1; *notNullElems += 1;
} }
#if defined(_DEBUG_VIEW) #if defined(_DEBUG_VIEW)
...@@ -3449,7 +3464,7 @@ static void spread_function(SQLFunctionCtx *pCtx) { ...@@ -3449,7 +3464,7 @@ static void spread_function(SQLFunctionCtx *pCtx) {
SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
int32_t numOfElems = 0; int32_t numOfElems = 0;
// todo : opt with pre-calculated result // todo : opt with pre-calculated result
// column missing cause the hasNull to be true // column missing cause the hasNull to be true
if (pCtx->preAggVals.isSet) { if (pCtx->preAggVals.isSet) {
...@@ -3552,7 +3567,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { ...@@ -3552,7 +3567,7 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
* the type of intermediate data is binary * the type of intermediate data is binary
*/ */
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (pCtx->currentStage == MERGE_STAGE) { if (pCtx->currentStage == MERGE_STAGE) {
assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
...@@ -4922,6 +4937,120 @@ static void sample_func_finalizer(SQLFunctionCtx *pCtx) { ...@@ -4922,6 +4937,120 @@ static void sample_func_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx); doFinalizer(pCtx);
} }
static SElapsedInfo * getSElapsedInfo(SQLFunctionCtx *pCtx) {
if (pCtx->stableQuery && pCtx->currentStage != MERGE_STAGE) {
return (SElapsedInfo *)pCtx->pOutput;
} else {
return GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
}
}
static bool elapsedSetup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) {
if (!function_setup(pCtx, pResInfo)) {
return false;
}
SElapsedInfo *pInfo = getSElapsedInfo(pCtx);
pInfo->min = MAX_TS_KEY;
pInfo->max = 0;
pInfo->hasResult = 0;
return true;
}
static int32_t elapsedRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
return BLK_DATA_NO_NEEDED;
}
static void elapsedFunction(SQLFunctionCtx *pCtx) {
SElapsedInfo *pInfo = getSElapsedInfo(pCtx);
if (pCtx->preAggVals.isSet) {
if (pInfo->min == MAX_TS_KEY) {
pInfo->min = pCtx->preAggVals.statis.min;
pInfo->max = pCtx->preAggVals.statis.max;
} else {
if (pCtx->order == TSDB_ORDER_ASC) {
pInfo->max = pCtx->preAggVals.statis.max;
} else {
pInfo->min = pCtx->preAggVals.statis.min;
}
}
} else {
// 0 == pCtx->size mean this is end interpolation.
if (0 == pCtx->size) {
if (pCtx->order == TSDB_ORDER_DESC) {
if (pCtx->end.key != INT64_MIN) {
pInfo->min = pCtx->end.key;
}
} else {
if (pCtx->end.key != INT64_MIN) {
pInfo->max = pCtx->end.key + 1;
}
}
goto elapsedOver;
}
int64_t *ptsList = (int64_t *)GET_INPUT_DATA_LIST(pCtx);
// pCtx->start.key == INT64_MIN mean this is first window or there is actual start point of current window.
// pCtx->end.key == INT64_MIN mean current window does not end in current data block or there is actual end point of current window.
if (pCtx->order == TSDB_ORDER_DESC) {
if (pCtx->start.key == INT64_MIN) {
pInfo->max = (pInfo->max < ptsList[pCtx->size - 1]) ? ptsList[pCtx->size - 1] : pInfo->max;
} else {
pInfo->max = pCtx->start.key + 1;
}
if (pCtx->end.key != INT64_MIN) {
pInfo->min = pCtx->end.key;
} else {
pInfo->min = ptsList[0];
}
} else {
if (pCtx->start.key == INT64_MIN) {
pInfo->min = (pInfo->min > ptsList[0]) ? ptsList[0] : pInfo->min;
} else {
pInfo->min = pCtx->start.key;
}
if (pCtx->end.key != INT64_MIN) {
pInfo->max = pCtx->end.key + 1;
} else {
pInfo->max = ptsList[pCtx->size - 1];
}
}
}
elapsedOver:
SET_VAL(pCtx, pCtx->size, 1);
if (pCtx->size > 0) {
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
pInfo->hasResult = DATA_SET_FLAG;
}
}
static void elapsedMerge(SQLFunctionCtx *pCtx) {
SElapsedInfo *pInfo = getSElapsedInfo(pCtx);
memcpy(pInfo, pCtx->pInput, (size_t)pCtx->inputBytes);
GET_RES_INFO(pCtx)->hasResult = pInfo->hasResult;
}
static void elapsedFinalizer(SQLFunctionCtx *pCtx) {
if (GET_RES_INFO(pCtx)->hasResult != DATA_SET_FLAG) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
return;
}
SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
*(double *)pCtx->pOutput = (double)pInfo->max - (double)pInfo->min;
if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) {
*(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64;
}
GET_RES_INFO(pCtx)->numOfRes = 1;
doFinalizer(pCtx);
}
///////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////
/* /*
* function compatible list. * function compatible list.
...@@ -4942,8 +5071,8 @@ int32_t functionCompatList[] = { ...@@ -4942,8 +5071,8 @@ int32_t functionCompatList[] = {
1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1,
// tid_tag, deriv, ceil, floor, round, csum, mavg, sample, // tid_tag, deriv, ceil, floor, round, csum, mavg, sample,
6, 8, 1, 1, 1, -1, -1, -1, 6, 8, 1, 1, 1, -1, -1, -1,
// block_info // block_info, elapsed
7 7, 1
}; };
SAggFunctionInfo aAggs[] = {{ SAggFunctionInfo aAggs[] = {{
...@@ -5426,4 +5555,16 @@ SAggFunctionInfo aAggs[] = {{ ...@@ -5426,4 +5555,16 @@ SAggFunctionInfo aAggs[] = {{
block_func_merge, block_func_merge,
dataBlockRequired, dataBlockRequired,
}, },
{
// 40
"elapsed",
TSDB_FUNC_ELAPSED,
TSDB_FUNC_ELAPSED,
TSDB_BASE_FUNC_SO,
elapsedSetup,
elapsedFunction,
elapsedFinalizer,
elapsedMerge,
elapsedRequired,
}
}; };
...@@ -933,9 +933,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t ...@@ -933,9 +933,10 @@ void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t
static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) {
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
bool hasAggregates = pCtx[0].preAggVals.isSet;
for (int32_t k = 0; k < numOfOutput; ++k) { for (int32_t k = 0; k < numOfOutput; ++k) {
bool hasAggregates = pCtx[k].preAggVals.isSet;
pCtx[k].size = forwardStep; pCtx[k].size = forwardStep;
pCtx[k].startTs = pWin->skey; pCtx[k].startTs = pWin->skey;
...@@ -1258,7 +1259,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, ...@@ -1258,7 +1259,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { for (int32_t k = 0; k < pOperator->numOfOutput; ++k) {
int32_t functionId = pCtx[k].functionId; int32_t functionId = pCtx[k].functionId;
if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP) { if (functionId != TSDB_FUNC_TWA && functionId != TSDB_FUNC_INTERP && functionId != TSDB_FUNC_ELAPSED) {
pCtx[k].start.key = INT64_MIN; pCtx[k].start.key = INT64_MIN;
continue; continue;
} }
...@@ -1301,7 +1302,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, ...@@ -1301,7 +1302,7 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
} }
} }
} else if (functionId == TSDB_FUNC_TWA) { } else if (functionId == TSDB_FUNC_TWA || functionId == TSDB_FUNC_ELAPSED) {
assert(curTs != windowKey); assert(curTs != windowKey);
if (prevRowIndex == -1) { if (prevRowIndex == -1) {
...@@ -1468,7 +1469,6 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ...@@ -1468,7 +1469,6 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); STimeWindow win = getActiveTimeWindow(pResultRowInfo, ts, pQueryAttr);
bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv);
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
numOfOutput, pInfo->rowCellInfoOffset); numOfOutput, pInfo->rowCellInfoOffset);
...@@ -1491,23 +1491,22 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul ...@@ -1491,23 +1491,22 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
continue; continue;
} }
STimeWindow w = pRes->win; STimeWindow w = pRes->win;
ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult,
tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
} }
assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP);
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1,
setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); tsCols[startPos], startPos, QUERY_IS_ASC_QUERY(pQueryAttr) ? w.ekey : w.skey, RESULT_ROW_END_INTERP);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
} setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput);
}
// restore current time window // restore current time window
ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx,
...@@ -1821,7 +1820,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde ...@@ -1821,7 +1820,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde
pCtx->hasNull = hasNull(pColIndex, pStatis); pCtx->hasNull = hasNull(pColIndex, pStatis);
// set the statistics data for primary time stamp column // set the statistics data for primary time stamp column
if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { if ((pCtx->functionId == TSDB_FUNC_SPREAD || pCtx->functionId == TSDB_FUNC_ELAPSED) && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
pCtx->preAggVals.isSet = true; pCtx->preAggVals.isSet = true;
pCtx->preAggVals.statis.min = pSDataBlock->info.window.skey; pCtx->preAggVals.statis.min = pSDataBlock->info.window.skey;
pCtx->preAggVals.statis.max = pSDataBlock->info.window.ekey; pCtx->preAggVals.statis.max = pSDataBlock->info.window.ekey;
...@@ -6203,7 +6202,17 @@ group_finished_exit: ...@@ -6203,7 +6202,17 @@ group_finished_exit:
return true; return true;
} }
static void resetInterpolation(SQLFunctionCtx *pCtx, SQueryRuntimeEnv* pRuntimeEnv, int32_t numOfOutput) {
if (!pRuntimeEnv->pQueryAttr->timeWindowInterpo) {
return;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].start.key = INT64_MIN;
pCtx[i].end.key = INT64_MIN;
}
*(TSKEY *)pRuntimeEnv->prevRow[0] = INT64_MIN;
}
static void doTimeEveryImpl(SOperatorInfo* pOperator, SQLFunctionCtx *pCtx, SSDataBlock* pBlock, bool newgroup) { static void doTimeEveryImpl(SOperatorInfo* pOperator, SQLFunctionCtx *pCtx, SSDataBlock* pBlock, bool newgroup) {
STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*) pOperator->info; STimeEveryOperatorInfo* pEveryInfo = (STimeEveryOperatorInfo*) pOperator->info;
...@@ -6431,6 +6440,7 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { ...@@ -6431,6 +6440,7 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* upstream = pOperator->upstream[0]; SOperatorInfo* upstream = pOperator->upstream[0];
STableId prevId = {0, 0};
while(1) { while(1) {
publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = upstream->exec(upstream, newgroup); SSDataBlock* pBlock = upstream->exec(upstream, newgroup);
...@@ -6440,6 +6450,12 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { ...@@ -6440,6 +6450,12 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
break; break;
} }
if (prevId.tid != pBlock->info.tid || prevId.uid != pBlock->info.uid) {
resetInterpolation(pIntervalInfo->pCtx, pRuntimeEnv, pOperator->numOfOutput);
prevId.uid = pBlock->info.uid;
prevId.tid = pBlock->info.tid;
}
// the pDataBlock are always the same one, no need to call this again // the pDataBlock are always the same one, no need to call this again
STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
...@@ -8785,6 +8801,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S ...@@ -8785,6 +8801,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
pQueryAttr->tsCompQuery = pQueryMsg->tsCompQuery; pQueryAttr->tsCompQuery = pQueryMsg->tsCompQuery;
pQueryAttr->simpleAgg = pQueryMsg->simpleAgg; pQueryAttr->simpleAgg = pQueryMsg->simpleAgg;
pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery; pQueryAttr->pointInterpQuery = pQueryMsg->pointInterpQuery;
pQueryAttr->needTableSeqScan = pQueryMsg->needTableSeqScan;
pQueryAttr->needReverseScan = pQueryMsg->needReverseScan; pQueryAttr->needReverseScan = pQueryMsg->needReverseScan;
pQueryAttr->stateWindow = pQueryMsg->stateWindow; pQueryAttr->stateWindow = pQueryMsg->stateWindow;
pQueryAttr->vgId = vgId; pQueryAttr->vgId = vgId;
......
...@@ -538,7 +538,7 @@ SArray* createTableScanPlan(SQueryAttr* pQueryAttr) { ...@@ -538,7 +538,7 @@ SArray* createTableScanPlan(SQueryAttr* pQueryAttr) {
} else { } else {
if (pQueryAttr->queryBlockDist) { if (pQueryAttr->queryBlockDist) {
op = OP_TableBlockInfoScan; op = OP_TableBlockInfoScan;
} else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery) { } else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery || pQueryAttr->needTableSeqScan) {
op = OP_TableSeqScan; op = OP_TableSeqScan;
} else if (pQueryAttr->needReverseScan || pQueryAttr->pointInterpQuery) { } else if (pQueryAttr->needReverseScan || pQueryAttr->pointInterpQuery) {
op = OP_DataBlocksOptScan; op = OP_DataBlocksOptScan;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include "os.h" #include "os.h"
#define TAOS_ERROR_C #define TAOS_ERROR_C
typedef struct { typedef struct {
int32_t val; int32_t val;
const char* str; const char* str;
...@@ -122,6 +122,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_CONFIG, "Invalid JSON configur ...@@ -122,6 +122,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_JSON_CONFIG, "Invalid JSON configur
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output")
// mnode // mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
...@@ -232,6 +233,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message lengt ...@@ -232,6 +233,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message lengt
TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories") TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting") TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_VNODE_OPEN_FAILED, "Vnode open failed")
// vnode // vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress")
......
...@@ -371,6 +371,7 @@ python3 ./test.py -f functions/function_irate.py ...@@ -371,6 +371,7 @@ python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f functions/function_ceil.py python3 ./test.py -f functions/function_ceil.py
python3 ./test.py -f functions/function_floor.py python3 ./test.py -f functions/function_floor.py
python3 ./test.py -f functions/function_round.py python3 ./test.py -f functions/function_round.py
python3 ./test.py -f functions/function_elapsed.py
python3 ./test.py -f functions/function_mavg.py python3 ./test.py -f functions/function_mavg.py
python3 ./test.py -f functions/function_csum.py python3 ./test.py -f functions/function_csum.py
......
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from functions.function_elapsed_case import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def genTime(self, no):
h = int(no / 60)
hs = "%d" %h
if h < 10:
hs = "0%d" %h
m = int(no % 60)
ms = "%d" %m
if m < 10:
ms = "0%d" %m
return hs, ms
def general(self):
# normal table
tdSql.execute("create database wxy_db minrows 10 maxrows 200")
tdSql.execute("use wxy_db")
tdSql.execute("create table t1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)")
for i in range(1, 1001):
hs, ms = self.genTime(i)
if i < 500:
ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 1)" % (hs, ms, i))
else:
ret = tdSql.execute("insert into t1(ts, i, b) values (\"2021-11-22 %s:%s:00\", %d, 0)" % (hs, ms, i))
tdSql.query("select count(*) from t1")
tdSql.checkEqual(int(tdSql.getData(0, 0)), 1000)
# empty normal table
tdSql.execute("create table t2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp)")
tdSql.execute("create database wxy_db_ns precision \"ns\"")
tdSql.execute("use wxy_db_ns")
tdSql.execute("create table t1 (ts timestamp, f float)")
tdSql.execute("insert into t1 values('2021-11-18 00:00:00.000000100', 1)"
"('2021-11-18 00:00:00.000000200', 2)"
"('2021-11-18 00:00:00.000000300', 3)"
"('2021-11-18 00:00:00.000000500', 4)")
# super table
tdSql.execute("use wxy_db")
tdSql.execute("create stable st1(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)")
tdSql.execute("create table st1s1 using st1 tags(1)")
tdSql.execute("create table st1s2 using st1 tags(2)")
for i in range(1, 1001):
hs, ms = self.genTime(i)
if 0 == i % 2:
ret = tdSql.execute("insert into st1s1(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i))
else:
ret = tdSql.execute("insert into st1s2(ts, i) values (\"2021-11-22 %s:%s:00\", %d)" % (hs, ms, i))
tdSql.query("select count(*) from st1s1")
tdSql.checkEqual(int(tdSql.getData(0, 0)), 500)
tdSql.query("select count(*) from st1s2")
tdSql.checkEqual(int(tdSql.getData(0, 0)), 500)
# empty super table
tdSql.execute("create stable st2(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)")
tdSql.execute("create table st2s1 using st1 tags(1)")
tdSql.execute("create table st2s2 using st1 tags(2)")
tdSql.execute("create stable st3(ts timestamp, i int, b bigint, f float, d double, bin binary(10), s smallint, t tinyint, bl bool, n nchar(10), ts1 timestamp) tags(id int)")
def run(self):
tdSql.prepare()
self.general()
ElapsedCase().run()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
此差异已折叠。
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from functions.function_elapsed_case import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
ElapsedCase(True).run()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
...@@ -103,10 +103,27 @@ class TDTestCase: ...@@ -103,10 +103,27 @@ class TDTestCase:
select count(*) as count, loc from st where ts between 1600000000000 and 1600000000010 group by loc''') select count(*) as count, loc from st where ts between 1600000000000 and 1600000000010 group by loc''')
tdSql.checkRows(6) tdSql.checkRows(6)
# https://jira.taosdata.com:18080/browse/TS-715
tdLog.info("test case for TS-715")
sql = ""
tdSql.execute("create table st2(ts timestamp, c1 int, c2 int, c3 int) tags(loc nchar(20))")
for i in range(101):
if i == 0:
sql = "select last(*) from sub0 "
else:
sql += f"union all select last(*) from sub{i} "
tdSql.execute("create table sub%d using st2 tags('nchar%d')" % (i, i))
tdSql.execute("insert into sub%d values(%d, %d, %d, %d)" % (i, self.ts + i, i, i, i))
tdSql.error(sql)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -419,7 +419,7 @@ if $data03 != @20-09-15 00:00:00.000@ then ...@@ -419,7 +419,7 @@ if $data03 != @20-09-15 00:00:00.000@ then
return -1 return -1
endi endi
sql select diff(val) from (select c1 val from nest_tb0); sql select diff(val) from (select ts,c1 val from nest_tb0);
if $rows != 9999 then if $rows != 9999 then
return -1 return -1
endi endi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册