diff --git a/Jenkinsfile2 b/Jenkinsfile2
index ed12a0628bb0469522ac96a340c9cbd91c6ce776..754617f99f71fa90380e42fa2b9b5f3248620d7c 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -265,6 +265,7 @@ def pre_test_build_win() {
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
+ python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
@@ -283,6 +284,7 @@ def run_win_test() {
bat '''
echo "windows test ..."
cd %WIN_CONNECTOR_ROOT%
+ python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
diff --git a/README-CN.md b/README-CN.md
index 7261102187ef5a587e051d7007e1cdab06c6b330..dc9bce481f1307265a380d26bb71c9353ec93386 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -14,7 +14,6 @@
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
-[](https://snapcraft.io/tdengine)
简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
@@ -36,11 +35,13 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
# 文档
-关于完整的使用手册,系统架构和更多细节,请参考 [TDengine 文档](https://docs.taosdata.com) 或者 [English Version](https://docs.tdengine.com)。
+关于完整的使用手册,系统架构和更多细节,请参考 [TDengine 文档](https://docs.taosdata.com) 或者 [English Documents](https://docs.tdengine.com)。
# 构建
-TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、macOS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过源码或者[安装包](https://docs.taosdata.com/get-started/package/)来安装。本快速指南仅适用于通过源码安装。
+TDengine 目前可以在 Linux、 Windows 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
+
+用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
## 安装工具
@@ -50,20 +51,6 @@ TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后
sudo apt-get install -y gcc cmake build-essential git libssl-dev
```
-编译或打包 JDBC 驱动源码,需安装 Java JDK 8 或以上版本和 Apache Maven 2.7 或以上版本。
-
-安装 OpenJDK 8:
-
-```bash
-sudo apt-get install -y openjdk-8-jdk
-```
-
-安装 Apache Maven:
-
-```bash
-sudo apt-get install -y maven
-```
-
#### 为 taos-tools 安装编译需要的软件
taosTools 是用于 TDengine 的辅助工具软件集合。目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。
@@ -79,19 +66,10 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
### CentOS 7.9:
```bash
-sudo yum install -y gcc gcc-c++ make cmake git openssl-devel
-```
-
-安装 OpenJDK 8:
-
-```bash
-sudo yum install -y java-1.8.0-openjdk
-```
-
-安装 Apache Maven:
-
-```bash
-sudo yum install -y maven
+sudo yum install epel-release
+sudo yum update
+sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
+sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8 & Fedora
@@ -100,29 +78,29 @@ sudo yum install -y maven
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
-安装 OpenJDK 8:
+#### 在 CentOS 上构建 taosTools 安装依赖软件
+
+#### For CentOS 7/RHEL
-```bash
-sudo dnf install -y java-1.8.0-openjdk
+```
+sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
```
-安装 Apache Maven:
+#### For CentOS 8/Rocky Linux
-```bash
-sudo dnf install -y maven
+```
+sudo yum install -y epel-release
+sudo yum install -y dnf-plugins-core
+sudo yum config-manager --set-enabled powertools
+sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
```
-#### 在 CentOS 上构建 taosTools 安装依赖软件
-
-为了在 CentOS 上构建 [taosTools](https://github.com/taosdata/taos-tools) 需要安装如下依赖软件
+注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
-```bash
-sudo yum install zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
+若 powertools 安装失败,可以尝试改用:
+```
+sudo yum config-manager --set-enabled Powertools
```
-
-注意:由于 snappy 缺乏 pkg-config 支持
-(参考 [链接](https://github.com/google/snappy/pull/86)),会导致
-cmake 提示无法发现 libsnappy,实际上工作正常。
### 设置 golang 开发环境
@@ -135,6 +113,12 @@ go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
+默认情况下,内嵌的 http 服务仍然可以从 TDengine 源码构建。当然您也可以使用以下命令选择构建 taosAdapter 作为 RESTful 接口的服务。
+
+```
+cmake .. -DBUILD_HTTP=false
+```
+
### 设置 rust 开发环境
TDengine 包含数个使用 Rust 语言开发的组件. 请参考 rust-lang.org 官方文档设置 rust 开发环境。
@@ -275,24 +259,6 @@ nmake install
sudo make install
```
-安装成功后,如果想以服务形式启动,先配置 `.plist` 文件,在终端中执行:
-
-```bash
-sudo cp ../packaging/macOS/com.taosdata.tdengine.plist /Library/LaunchDaemons
-```
-
-在终端中启动 TDengine 服务:
-
-```bash
-sudo launchctl load /Library/LaunchDaemons/com.taosdata.tdengine.plist
-```
-
-在终端中停止 TDengine 服务:
-
-```bash
-sudo launchctl unload /Library/LaunchDaemons/com.taosdata.tdengine.plist
-```
-
## 快速运行
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
@@ -342,21 +308,6 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
-## 第三方连接器
-
-TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。
-
-- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
-- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
-- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/examples/lua)
-- [PHP](https://www.taosdata.com/en/documentation/connector#c-cpp)
-
-# 运行和添加测试例
-
-TDengine 的测试框架和所有测试例全部开源。
-
-点击 [这里](https://github.com/taosdata/TDengine/blob/develop/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
-
# 成为社区贡献者
点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
@@ -364,7 +315,3 @@ TDengine 的测试框架和所有测试例全部开源。
# 加入技术交流群
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
-
-# [谁在使用 TDengine](https://github.com/taosdata/TDengine/issues/2432)
-
-欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。
diff --git a/README.md b/README.md
index 7206e7156be07072c9dda7e413b536748f565c75..31f8dcd2779cd3f68183d2bb61cd042a1718261f 100644
--- a/README.md
+++ b/README.md
@@ -20,7 +20,7 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde
# What is TDengine?
-TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. Below are the most outstanding advantages of TDengine:
+TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other TSDBs with the following advantages.:
- High-Performance: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
@@ -28,7 +28,7 @@ TDengine is an open source, cloud native time-series database optimized for Inte
- Cloud Native: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine can be deployed on public, private or hybrid clouds.
-- Open Source: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.7k stars on GitHub, an active developer community, and over 137k running instances worldwide.
+- Open Source: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub, an active developer community, and over 137k running instances worldwide.
- Ease of Use: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
@@ -52,20 +52,6 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
sudo apt-get install -y gcc cmake build-essential git libssl-dev
```
-To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
-
-To install openjdk-8:
-
-```bash
-sudo apt-get install -y openjdk-8-jdk
-```
-
-To install Apache Maven:
-
-```bash
-sudo apt-get install -y maven
-```
-
#### Install build dependencies for taosTools
We provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. From TDengine 2.4.0.0, taosBenchmark and taosdump were not released together with TDengine.
@@ -86,36 +72,12 @@ sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
-To install openjdk-8:
-
-```bash
-sudo yum install -y java-1.8.0-openjdk
-```
-
-To install Apache Maven:
-
-```bash
-sudo yum install -y maven
-```
-
### CentOS 8 & Fedora
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
-To install openjdk-8:
-
-```bash
-sudo dnf install -y java-1.8.0-openjdk
-```
-
-To install Apache Maven:
-
-```bash
-sudo dnf install -y maven
-```
-
#### Install build dependencies for taosTools on CentOS
To build the [taosTools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed.
@@ -124,11 +86,11 @@ To build the [taosTools](https://github.com/taosdata/taos-tools) on CentOS, the
sudo yum install zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
```
-Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well.
+Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy will works well.
### Setup golang environment
-TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup.
+TDengine includes a few components developed by Go language. Please refer to golang.org official documentation for golang environment setup.
Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading.
@@ -139,7 +101,7 @@ go env -w GOPROXY=https://goproxy.cn,direct
### Setup rust environment
-TDengine includees few compoments developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
+TDengine includees a few compoments developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
## Get the source codes
@@ -304,24 +266,6 @@ After building successfully, TDengine can be installed by:
sudo make install
```
-To start the service after installation, config `.plist` file first, in a terminal, use:
-
-```bash
-sudo cp ../packaging/macOS/com.taosdata.tdengine.plist /Library/LaunchDaemons
-```
-
-To start the service, in a terminal, use:
-
-```bash
-sudo launchctl load /Library/LaunchDaemons/com.taosdata.tdengine.plist
-```
-
-To stop the service, in a terminal, use:
-
-```bash
-sudo launchctl unload /Library/LaunchDaemons/com.taosdata.tdengine.plist
-```
-
## Quick Run
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
@@ -371,15 +315,6 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
-## Third Party Connectors
-
-The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them.
-
-- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
-- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
-- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
-- [PHP](https://www.taosdata.com/en/documentation/connector#c-cpp)
-
# How to run the test cases and how to add a new test case
TDengine's test framework and all test cases are fully open source.
diff --git a/cmake/cmake.install b/cmake/cmake.install
index 07773d1015f91dd39d7ad83c2c681857bc4777b9..4e3d0b166aba447cf48fb664f429a885caece953 100644
--- a/cmake/cmake.install
+++ b/cmake/cmake.install
@@ -1,4 +1,8 @@
-IF (TD_LINUX)
+IF (EXISTS /var/lib/taos/dnode/dnodeCfg.json)
+ INSTALL(CODE "MESSAGE(\"The default data directory /var/lib/taos contains old data of tdengine 2.x, please clear it before installing!\")")
+ELSEIF (EXISTS C:/TDengine/data/dnode/dnodeCfg.json)
+ INSTALL(CODE "MESSAGE(\"The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!\")")
+ELSEIF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index e6ae38cb82087a02f093b7d42199b10fc086e491..989f90a150e8669738e1ce9f7795631c1dab30a5 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 79bf23d
+ GIT_TAG 43924b8
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
index be46ebf0c97a29b57c1b57eb8ea5c9394f85b93a..41109937053c31f0a141fcc90016397863152e57 100644
--- a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -55,9 +55,6 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
-
-
-
diff --git a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 2061961e4255fd487fcd5eae3d0c6b78d49afccd..81e1169489d4188e14f4c5338ca322041bba80fb 100644
--- a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -46,9 +46,6 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te
-
-
-
diff --git a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
index a8f3423787ba1ce89df5a046e6e55656f57139d8..aad94c3d913a128b344757162c231affc6a64651 100644
--- a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -63,9 +63,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
-
-
-
diff --git a/docs/en/07-develop/03-insert-data/_rust_line.mdx b/docs/en/07-develop/03-insert-data/_rust_line.mdx
index dbb35d76bc3517463902b642ce4a3861ae42b2f8..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/en/07-develop/03-insert-data/_rust_line.mdx
+++ b/docs/en/07-develop/03-insert-data/_rust_line.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx b/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx
index cc2055510bce006491ed277a8e884b9958a5a993..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx
+++ b/docs/en/07-develop/03-insert-data/_rust_opts_json.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx
index 109c0c5d019e250b87e12c535e4f55c69924b4af..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/_rust_opts_telnet.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}}
```
diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/rust.mdx
index 56ca586c7e8ada6e4422596906e01887d4726fd0..ab06f72069e29361a033f724308d950afe6e8d42 100644
--- a/docs/en/14-reference/03-connector/rust.mdx
+++ b/docs/en/14-reference/03-connector/rust.mdx
@@ -10,16 +10,14 @@ import TabItem from '@theme/TabItem';
import Preparation from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
-import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx"
-import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx"
-import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx"
+import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
-`libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data.
+[`taos`][taos] is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data.
-`libtaos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **REST connection**, which connects to TDengine instances via taosAdapter's REST interface.
+Rust connector provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **Websocket connection**, which connects to TDengine instances via taosAdapter service.
-The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/libtaos-rs).
+The source code is hosted on [taosdata/taos-connector-rust](https://github.com/taosdata/taos-connector-rust).
## Supported platforms
@@ -30,241 +28,333 @@ REST connections are supported on all platforms that can run Rust.
Please refer to [version support list](/reference/connector#version-support).
-The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues.
+The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
## Installation
### Pre-installation
+
* Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
-### Adding libtaos dependencies
+### Add dependencies
-Add the [libtaos][libtaos] dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected.
+Add the dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected.
-
-
+
+
-Add [libtaos][libtaos] to the `Cargo.toml` file.
+Add [taos] to the `Cargo.toml` file.
```toml
[dependencies]
# use default feature
-libtaos = "*"
+taos = "*"
```
-
+
-Add [libtaos][libtaos] to the `Cargo.toml` file and enable the `rest` feature.
+Add [taos] to the `Cargo.toml` file.
```toml
[dependencies]
-# use rest feature
-libtaos = { version = "*", features = ["rest"]}
+taos = { version = "*", default-features = false, features = ["native"] }
```
-
-
+
-### Using connection pools
-
-Please enable the `r2d2` feature in `Cargo.toml`.
+Add [taos] to the `Cargo.toml` file and enable the `ws` feature.
```toml
[dependencies]
-# with taosc
-libtaos = { version = "*", features = ["r2d2"] }
-# or rest
-libtaos = { version = "*", features = ["rest", "r2d2"] }
+taos = { version = "*", default-features = false, features = ["ws"] }
```
+
+
+
## Create a connection
-The [TaosCfgBuilder] provides the user with an API in the form of a constructor for the subsequent creation of connections or use of connection pools.
+In rust connector, we use a DSN connection string as a connection builder. For example,
```rust
-let cfg: TaosCfg = TaosCfgBuilder::default()
- .ip("127.0.0.1")
- .user("root")
- .pass("taosdata")
- .db("log") // do not set if not require a default database.
- .port(6030u16)
- .build()
- .expect("TaosCfg builder error");
-}
+let builder = TaosBuilder::from_dsn("taos://")?;
```
-You can now use this object to create the connection.
+You can now use connection client to create the connection.
```rust
-let conn = cfg.connect()? ;
+let conn = builder.build()?;
```
The connection object can create more than one.
```rust
-let conn = cfg.connect()? ;
-let conn2 = cfg.connect()? ;
+let conn1 = builder.build()?;
+let conn2 = builder.build()?;
+```
+
+DSN is short for **D**ata **S**ource **N**ame string - [a data structure used to describe a connection to a data source](https://en.wikipedia.org/wiki/Data_source_name).
+
+A common DSN is basically constructed as this:
+
+```text
+[+]://[[:@]:][/][?=[&=]]
+|------|------------|---|-----------|-----------|------|------|------------|-----------------------|
+|driver| protocol | | username | password | host | port | database | params |
```
-You can use connection pools in applications.
+- **Driver**: the main entrypoint to a processer. **Required**. In Rust connector, the supported driver names are listed here:
+ - **taos**: the legacy TDengine connection data source.
+ - **tmq**: subscription data source from TDengine.
+ - **http/ws**: use websocket protocol via `ws://` scheme.
+ - **https/wss**: use websocket protocol via `wss://` scheme.
+- **Protocol**: the additional information appended to driver, which can be be used to support different kind of data sources. By default, leave it empty for native driver(only under feature "native"), and `ws/wss` for websocket driver (only under feature "ws"). **Optional**.
+- **Username**: as its definition, is the username to the connection. **Optional**.
+- **Password**: the password of the username. **Optional**.
+- **Host**: address host to the datasource. **Optional**.
+- **Port**: address port to the datasource. **Optional**.
+- **Database**: database name or collection name in the datasource. **Optional**.
+- **Params**: a key-value map for any other informations to the datasource. **Optional**.
+
+Here is a simple DSN connection string example:
+
+```text
+taos+ws://localhost:6041/test
+```
+
+which means connect `localhost` with port `6041` via `ws` protocol, and make `test` as the default database.
+
+So that you can use DSN to specify connection protocol at runtime:
```rust
-let pool = r2d2::Pool::builder()
- .max_size(10000) // max connections
- .build(cfg)? ;
+use taos::*; // use it like a `prelude` mod, we need some traits at next.
-// ...
-// Use pool to get connection
-let conn = pool.get()? ;
+// use native protocol.
+let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
+let conn1 = builder.build();
+
+// use websocket protocol.
+let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
```
-After that, you can perform the following operations on the database.
+After connected, you can perform the following operations on the database.
```rust
-async fn demo() -> Result<(), Error> {
- // get connection ...
-
- // create database
- conn.exec("create database if not exists demo").await?
- // change database context
- conn.exec("use demo").await?
- // create table
- conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?
- // insert
- conn.exec("insert into tb1 values(now, 1)").await?
- // query
- let rows = conn.query("select * from tb1").await?
- for row in rows.rows {
- println!("{}", row.into_iter().join(","));
+async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
+ // prepare database
+ taos.exec_many([
+ format!("DROP DATABASE IF EXISTS `{db}`"),
+ format!("CREATE DATABASE `{db}`"),
+ format!("USE `{db}`"),
+ ])
+ .await?;
+
+ let inserted = taos.exec_many([
+ // create super table
+ "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
+ TAGS (`groupid` INT, `location` BINARY(16))",
+ // create child table
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ // insert into child table
+ "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
+ // insert with NULL values
+ "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
+ // insert and automatically create table with tags if not exists
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ // insert many records in a single sql
+ "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
+ ]).await?;
+
+ assert_eq!(inserted, 6);
+ let mut result = taos.query("select * from `meters`").await?;
+
+ for field in result.fields() {
+ println!("got field: {}", field.name());
}
+
+ let values = result.
}
```
-## Usage examples
-
-### Write data
+Rust connector provides two kinds of ways to fetch data:
-#### SQL Write
+```rust
+ // Query option 1, use rows stream.
+ let mut rows = result.rows();
+ while let Some(row) = rows.try_next().await? {
+ for (name, value) in row {
+ println!("got value of {}: {}", name, value);
+ }
+ }
-
+ // Query options 2, use deserialization with serde.
+ #[derive(Debug, serde::Deserialize)]
+ #[allow(dead_code)]
+ struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+ groupid: i32,
+ // binary/varchar to String
+ location: String,
+ }
-#### InfluxDB line protocol write
+ let records: Vec = taos
+ .query("select * from `meters`")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
-
+ dbg!(records);
+ Ok(())
+```
-#### OpenTSDB Telnet line protocol write
+## Usage examples
-
+### Write data
-#### OpenTSDB JSON line protocol write
+#### SQL Write
-
+
-### Query data
+#### Stmt bind
-
+
-### More sample programs
+### Query data
-| Program Path | Program Description |
-| -------------- | ----------------------------------------------------------------------------- |
-| [demo.rs] | Basic API Usage Examples |
-| [bailongma-rs] | Using TDengine as the Prometheus remote storage API adapter for the storage backend, using the r2d2 connection pool |
+|
## API Reference
-### Connection constructor API
-
-The [Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) constructor pattern is Rust's solution for handling complex data types or optional configuration types. The [libtaos] implementation uses the connection constructor [TaosCfgBuilder] as the entry point for the TDengine Rust connector. The [TaosCfgBuilder] provides optional configuration of servers, ports, databases, usernames, passwords, etc.
-
-Using the `default()` method, you can construct a [TaosCfg] with default parameters for subsequent connections to the database or establishing connection pools.
+### Connector builder
-```rust
-let cfg = TaosCfgBuilder::default().build()? ;
-```
-
-Using the constructor pattern, the user can set on-demand.
+Use DSN to directly construct a TaosBuilder object.
```rust
-let cfg = TaosCfgBuilder::default()
- .ip("127.0.0.1")
- .user("root")
- .pass("taosdata")
- .db("log")
- .port(6030u16)
- .build()? ;
+let builder = TaosBuilder::from_dsn("")? ;
```
-Create TDengine connection using [TaosCfg] object.
+Use `builder` to create many connections:
```rust
-let conn: Taos = cfg.connect();
+let conn: Taos = cfg.build();
```
-### Connection pooling
+### Connection pool
-In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2].
+In complex applications, we recommend enabling connection pools. Connection pool for [taos] is implemented using [r2d2] by enabling "r2d2" feature.
-As follows, a connection pool with default parameters can be generated.
+Basically, a connection pool with default parameters can be generated as:
```rust
-let pool = r2d2::Pool::new(cfg)? ;
+let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
```
-You can set the same connection pool parameters using the connection pool's constructor.
+You can set the connection pool parameters using the `PoolBuilder`.
```rust
- use std::time::Duration;
- let pool = r2d2::Pool::builder()
- .max_size(5000) // max connections
- .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection
- .min_idle(Some(1000)) // minimal idle connections
- .connection_timeout(Duration::from_minutes(2))
- .build(cfg);
+let dsn = "taos://localhost:6030";
+
+let opts = PoolBuilder::new()
+ .max_size(5000) // max connections
+ .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
+ .min_idle(Some(1000)) // minimal idle connections
+ .connection_timeout(Duration::from_secs(2));
+
+let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
```
-In the application code, use `pool.get()? ` to get a connection object [Taos].
+In the application code, use `pool.get()?` to get a connection object [Taos].
```rust
let taos = pool.get()? ;
```
-The [Taos] structure is the connection manager in [libtaos] and provides two main APIs.
+### Connection methods
-1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc.
+The [Taos] connection struct provides several APIs for convenient use.
+
+1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT` etc. and return affected rows (only meaningful to `INSERT`).
+
+ ```rust
+ let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?;
+ ```
+
+2. `exec_many`: You can execute many SQL statements in order with `exec_many` method.
```rust
- taos.exec().await?
+ taos.exec_many([
+ "CREATE DATABASE test",
+ "USE test",
+ "CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)",
+ ]).await?;
```
-2. `query`: Execute the query statement and return the [TaosQueryData] object.
+3. `query`: Execute the query statement and return the [ResultSet] object.
```rust
- let q = taos.query("select * from log.logs").await?
+ let mut q = taos.query("select * from log.logs").await?
```
- The [TaosQueryData] object stores the query result data and basic information about the returned columns (column name, type, length).
+ The [ResultSet] object stores the query result data and basic information about the returned columns (column name, type, length).
- Column information is stored using [ColumnMeta].
+ Get filed information with `fields` method.
```rust
- let cols = &q.column_meta;
+ let cols = q.fields();
for col in cols {
- println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes);
+ println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes());
}
```
- It fetches data line by line.
+ Users could fetch data by rows.
```rust
- for (i, row) in q.rows.iter().enumerate() {
- for (j, cell) in row.iter().enumerate() {
- println!("cell({}, {}) data: {}", i, j, cell);
+ let mut rows = result.rows();
+ let mut nrows = 0;
+ while let Some(row) = rows.try_next().await? {
+ for (col, (name, value)) in row.enumerate() {
+ println!(
+ "[{}] got value in col {} (named `{:>8}`): {}",
+ nrows, col, name, value
+ );
}
+ nrows += 1;
+ }
+ ```
+
+ Or use it with [serde](https://serde.rs) deserialization.
+
+ ```rust
+ #[derive(Debug, Deserialize)]
+ struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+ groupid: i32,
+ // binary/varchar to String
+ location: String,
}
+
+ let records: Vec = taos
+ .query("select * from `meters`")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
```
Note that Rust asynchronous functions and an asynchronous runtime are required.
@@ -275,110 +365,152 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
- `.use_database(database: &str)`: Executes the `USE` statement.
-In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
-
-### Bind Interface
+### Bind API
-Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
+Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command with the [Taos] object.
```rust
-let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ;
+let mut stmt = Stmt::init(&taos).await?;
+stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
```
The bind object provides a set of interfaces for implementing parameter binding.
-##### `.set_tbname(tbname: impl ToCString)`
+#### `.set_tbname(name)`
To bind table names.
-##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)`
+```rust
+let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
+stmt.set_tbname("d0")?;
+```
+
+#### `.set_tags(&[tag])`
-Bind sub-table table names and tag values when the SQL statement uses a super table.
+Bind tag values when the SQL statement uses a super table.
```rust
-let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)") ? ;
-// tags can be created with any supported type, here is an example using JSON
-let v = Field::Json(serde_json::from_str("{\"tag1\":\"one, two, three, four, five, six, seven, eight, nine, ten\"}").unwrap());
-stmt.set_tbname_tags("tb0", [&tag])? ;
+let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)")?;
+stmt.set_tbname("d0")?;
+stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
-##### `.bind(params: impl IntoParams)`
+#### `.bind(&[column])`
-Bind value types. Use the [Field] structure to construct the desired type and bind.
+Bind value types. Use the [ColumnView] structure to construct the desired type and bind.
```rust
-let ts = Field::Timestamp(Timestamp::now());
-let value = Field::Float(0.0);
-stmt.bind(vec![ts, value].iter())? ;
+let params = vec![
+ ColumnView::from_millis_timestamp(vec![164000000000]),
+ ColumnView::from_bools(vec![true]),
+ ColumnView::from_tiny_ints(vec![i8::MAX]),
+ ColumnView::from_small_ints(vec![i16::MAX]),
+ ColumnView::from_ints(vec![i32::MAX]),
+ ColumnView::from_big_ints(vec![i64::MAX]),
+ ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
+ ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
+ ColumnView::from_unsigned_ints(vec![u32::MAX]),
+ ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
+ ColumnView::from_floats(vec![f32::MAX]),
+ ColumnView::from_doubles(vec![f64::MAX]),
+ ColumnView::from_varchar(vec!["ABC"]),
+ ColumnView::from_nchar(vec!["涛思数据"]),
+];
+let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
```
-##### `.execute()`
+#### `.execute()`
-Execute SQL.[Stmt] objects can be reused, re-binded, and executed after execution.
+Execute to insert all bind records. [Stmt] objects can be reused, re-bind, and executed after execution. Remember to call `add_batch` before `execute`.
```rust
-stmt.execute()? ;
+stmt.add_batch()?.execute()?;
// next bind cycle.
// stmt.set_tbname()? ;
//stmt.bind()? ;
-//stmt.execute()? ;
+//stmt.add_batch().execute()? ;
```
-### Line protocol interface
+A runnable example for bind can be found [here](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
-The line protocol interface supports multiple modes and different precision and requires the introduction of constants in the schemaless module to set.
+### Subscription API
+
+Users can subscribe a [TOPIC](../../../taos-sql/tmq/) with TMQ(the TDengine Message Queue) API.
+
+Start from a TMQ builder:
```rust
-use libtaos::*;
-use libtaos::schemaless::*;
+let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
-- InfluxDB line protocol
+Build a consumer:
- ```rust
- let lines = [
- "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000"
- "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000"
- ];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)? ;
- ```
+```rust
+let mut consumer = tmq.build()?;
+```
-- OpenTSDB Telnet Protocol
+Subscribe a topic:
- ```rust
- let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ;
- ```
+```rust
+consumer.subscribe(["tmq_meters"]).await?;
+```
-- OpenTSDB JSON protocol
+Consume messages, and commit the offset for each message.
- ```rust
- let lines = [r#"
- {
- "metric": "st",
- "timestamp": 1626006833,
- "value": 10,
- "tags": {
- "t1": true,
- "t2": false,
- "t3": 10,
- "t4": "123_abc_.! @#$%^&*:;,. /? |+-=()[]{}<>"
+```rust
+{
+ let mut stream = consumer.stream();
+
+ while let Some((offset, message)) = stream.try_next().await? {
+ // get information from offset
+
+ // the topic
+ let topic = offset.topic();
+ // the vgroup id, like partition id in kafka.
+ let vgroup_id = offset.vgroup_id();
+ println!("* in vgroup id {vgroup_id} of topic {topic}\n");
+
+ if let Some(data) = message.into_data() {
+ while let Some(block) = data.fetch_raw_block().await? {
+ // one block for one table, get table name if needed
+ let name = block.table_name();
+ let records: Vec = block.deserialize().try_collect()?;
+ println!(
+ "** table: {}, got {} records: {:#?}\n",
+ name.unwrap(),
+ records.len(),
+ records
+ );
}
- }"#];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ;
- ```
+ }
+ consumer.commit(offset).await?;
+ }
+}
+```
+
+Unsubscribe:
+
+```rust
+consumer.unsubscribe().await;
+```
+
+In TMQ DSN, you must choose to subscribe with a group id. Also, there's several options could be set:
+
+- `group.id`: **Required**, a group id is any visible string you set.
+- `client.id`: a optional client description string.
+- `auto.offset.reset`: choose to subscribe from *earliest* or *latest*, default is *none* which means 'earliest'.
+- `enable.auto.commit`: automatically commit with specified time interval. By default - in the recommended way _ you must use `commit` to ensure that you've consumed the messages correctly, otherwise, consumers will received repeated messages when re-subscribe.
+- `auto.commit.interval.ms`: the auto commit interval in milliseconds.
+
+Check the whole subscription example at [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
-Please move to the Rust documentation hosting page for other related structure API usage instructions: .
+Please move to the Rust documentation hosting page for other related structure API usage instructions: .
-[libtaos]: https://github.com/taosdata/libtaos-rs
-[tdengine]: https://github.com/taosdata/TDengine
-[bailongma-rs]: https://github.com/taosdata/bailongma-rs
+[TDengine]: https://github.com/taosdata/TDengine
[r2d2]: https://crates.io/crates/r2d2
-[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs
-[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html
-[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html
-[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html
-[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html
-[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html
-[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html
+[Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html
+[ResultSet]: https://docs.rs/taos/latest/taos/struct.ResultSet.html
+[Value]: https://docs.rs/taos/latest/taos/enum.Value.html
+[Stmt]: https://docs.rs/taos/latest/taos/stmt/struct.Stmt.html
+[taos]: https://crates.io/crates/taos
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd..8b6a26ae52af42e339e2f5a8d0824a9e1be3f386 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -3,7 +3,7 @@ title: Schemaless Writing
description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
---
-In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
+In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
@@ -39,10 +39,10 @@ In the schemaless writing data line protocol, each data item in the field_set ne
| -------- | -------- | ------------ | -------------- |
| 1 | none or f64 | double | 8 |
| 2 | f32 | float | 4 |
-| 3 | i8 | TinyInt | 1 |
-| 4 | i16 | SmallInt | 2 |
-| 5 | i32 | Int | 4 |
-| 6 | i64 or i | Bigint | 8 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
@@ -72,11 +72,11 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
-7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value.
-8. Errors encountered throughout the processing will interrupt the writing process and return an error code.
+7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
+8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal.
:::tip
-All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition
diff --git a/docs/examples/rust/Cargo.toml b/docs/examples/rust/Cargo.toml
index 114407e69edcb94fdeef77e6ae581569c5451160..136d09ffbbbd9c7bc1b876e7bfc630dea0560382 100644
--- a/docs/examples/rust/Cargo.toml
+++ b/docs/examples/rust/Cargo.toml
@@ -1,2 +1,2 @@
[workspace]
-members = ["restexample", "nativeexample", "schemalessexample"]
+members = ["restexample", "nativeexample"]
diff --git a/docs/examples/rust/nativeexample/Cargo.toml b/docs/examples/rust/nativeexample/Cargo.toml
index 64fd10a3e915a39c321b56b6f38be51417d8d18e..cdf739d35774df37781ad6ea75bfb8214b21e6ea 100644
--- a/docs/examples/rust/nativeexample/Cargo.toml
+++ b/docs/examples/rust/nativeexample/Cargo.toml
@@ -5,6 +5,9 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-libtaos = { version = "0.4.3" }
-tokio = { version = "*", features = ["rt", "macros", "rt-multi-thread"] }
-bstr = { version = "*" }
+anyhow = "1"
+chrono = "0.4"
+serde = { version = "1", features = ["derive"] }
+tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
+
+taos = { version = "0.*" }
diff --git a/docs/examples/rust/nativeexample/examples/connect.rs b/docs/examples/rust/nativeexample/examples/connect.rs
index 8e27458de5d428479668a0e5133ca904cf27c213..fb226d8710bb72abf3993663031fd9ea1a08b25b 100644
--- a/docs/examples/rust/nativeexample/examples/connect.rs
+++ b/docs/examples/rust/nativeexample/examples/connect.rs
@@ -1,19 +1,9 @@
-use libtaos::*;
+use taos::*;
-fn taos_connect() -> Result {
- TaosCfgBuilder::default()
- .ip("localhost")
- .user("root")
- .pass("taosdata")
- // .db("log") // remove comment if you want to connect to database log by default.
- .port(6030u16)
- .build()
- .expect("TaosCfg builder error")
- .connect()
-}
-
-fn main() {
+#[tokio::main]
+async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
- let taos = taos_connect().unwrap();
- println!("Connected")
+ let taos = TaosBuilder::from_dsn("taos://")?.build()?;
+ println!("Connected");
+ Ok(())
}
diff --git a/docs/examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs
index 190f8c1ef6d50a8e9c925178c1a9d31c22e3d4df..26084746f20a3662383b417eb98016f09ad0913e 100644
--- a/docs/examples/rust/nativeexample/examples/stmt_example.rs
+++ b/docs/examples/rust/nativeexample/examples/stmt_example.rs
@@ -1,38 +1,40 @@
-use bstr::BString;
-use libtaos::*;
+use taos::*;
#[tokio::main]
-async fn main() -> Result<(), Error> {
- let taos = TaosCfg::default().connect().expect("fail to connect");
+async fn main() -> anyhow::Result<()> {
+ let taos = TaosBuilder::from_dsn("taos://")?.build()?;
taos.create_database("power").await?;
taos.use_database("power").await?;
- taos.exec("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
- let mut stmt = taos.stmt("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
+ taos.exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
+
+ let mut stmt = Stmt::init(&taos)?;
+ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
// bind table name and tags
stmt.set_tbname_tags(
"d1001",
- [
- Field::Binary(BString::from("California.SanFrancisco")),
- Field::Int(2),
- ],
+ &[Value::VarChar("San Fransico".into()), Value::Int(2)],
)?;
// bind values.
let values = vec![
- Field::Timestamp(Timestamp::new(1648432611249, TimestampPrecision::Milli)),
- Field::Float(10.3),
- Field::Int(219),
- Field::Float(0.31),
+ ColumnView::from_millis_timestamp(vec![1648432611249]),
+ ColumnView::from_floats(vec![10.3]),
+ ColumnView::from_ints(vec![219]),
+ ColumnView::from_floats(vec![0.31]),
];
stmt.bind(&values)?;
// bind one more row
let values2 = vec![
- Field::Timestamp(Timestamp::new(1648432611749, TimestampPrecision::Milli)),
- Field::Float(12.6),
- Field::Int(218),
- Field::Float(0.33),
+ ColumnView::from_millis_timestamp(vec![1648432611749]),
+ ColumnView::from_floats(vec![12.6]),
+ ColumnView::from_ints(vec![218]),
+ ColumnView::from_floats(vec![0.33]),
];
stmt.bind(&values2)?;
- // execute
- stmt.execute()?;
+
+ stmt.add_batch()?;
+
+ // execute.
+ let rows = stmt.execute()?;
+ assert_eq!(rows, 2);
Ok(())
}
diff --git a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
index 4febff9be7bd5771db449fbfb184a9f208e61d8a..7e0a347948fc8450dead0babbbdd1eace2f06d1e 100644
--- a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
+++ b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
@@ -1,3 +1,101 @@
-fn main() {
-
-}
\ No newline at end of file
+use std::time::Duration;
+
+use chrono::{DateTime, Local};
+use taos::*;
+
+// Query options 2, use deserialization with serde.
+#[derive(Debug, serde::Deserialize)]
+#[allow(dead_code)]
+struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+}
+
+async fn prepare(taos: Taos) -> anyhow::Result<()> {
+ let inserted = taos.exec_many([
+ // create child table
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ // insert into child table
+ "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
+ // insert with NULL values
+ "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
+ // insert and automatically create table with tags if not exists
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ // insert many records in a single sql
+ "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
+ ]).await?;
+ assert_eq!(inserted, 6);
+ Ok(())
+}
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+ let dsn = "taos://localhost:6030";
+ let builder = TaosBuilder::from_dsn(dsn)?;
+
+ let taos = builder.build()?;
+ let db = "tmq";
+
+ // prepare database
+ taos.exec_many([
+ format!("DROP TOPIC IF EXISTS tmq_meters"),
+ format!("DROP DATABASE IF EXISTS `{db}`"),
+ format!("CREATE DATABASE `{db}`"),
+ format!("USE `{db}`"),
+ // create super table
+ format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"),
+ // create topic for subscription
+ format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
+ ])
+ .await?;
+
+ let task = tokio::spawn(prepare(taos));
+
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ // subscribe
+ let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
+
+ let mut consumer = tmq.build()?;
+ consumer.subscribe(["tmq_meters"]).await?;
+
+ {
+ let mut stream = consumer.stream();
+
+ while let Some((offset, message)) = stream.try_next().await? {
+ // get information from offset
+
+ // the topic
+ let topic = offset.topic();
+ // the vgroup id, like partition id in kafka.
+ let vgroup_id = offset.vgroup_id();
+ println!("* in vgroup id {vgroup_id} of topic {topic}\n");
+
+ if let Some(data) = message.into_data() {
+ while let Some(block) = data.fetch_raw_block().await? {
+ // one block for one table, get table name if needed
+ let name = block.table_name();
+ let records: Vec = block.deserialize().try_collect()?;
+ println!(
+ "** table: {}, got {} records: {:#?}\n",
+ name.unwrap(),
+ records.len(),
+ records
+ );
+ }
+ }
+ consumer.commit(offset).await?;
+ }
+ }
+
+ consumer.unsubscribe().await;
+
+ task.await??;
+
+ Ok(())
+}
diff --git a/docs/examples/rust/restexample/Cargo.toml b/docs/examples/rust/restexample/Cargo.toml
index a5f89f8a3be3baabd298a70947f6c5d3df088aae..5fffe215d439a84c9b0da86f6b0f5607abd20bcd 100644
--- a/docs/examples/rust/restexample/Cargo.toml
+++ b/docs/examples/rust/restexample/Cargo.toml
@@ -4,5 +4,9 @@ version = "0.1.0"
edition = "2021"
[dependencies]
-libtaos = { version = "0.4.3", features = ["rest"] }
-tokio = { version = "*", features = ["rt", "macros", "rt-multi-thread"] }
+anyhow = "1"
+chrono = "0.4"
+serde = { version = "1", features = ["derive"] }
+tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
+
+taos = { version = "0.*" }
diff --git a/docs/examples/rust/restexample/examples/connect.rs b/docs/examples/rust/restexample/examples/connect.rs
index b3718342c4a142786572a7eec46d6fa36f651566..fb226d8710bb72abf3993663031fd9ea1a08b25b 100644
--- a/docs/examples/rust/restexample/examples/connect.rs
+++ b/docs/examples/rust/restexample/examples/connect.rs
@@ -1,20 +1,9 @@
-use libtaos::*;
-
-fn taos_connect() -> Result {
- TaosCfgBuilder::default()
- .ip("localhost")
- .user("root")
- .pass("taosdata")
- // .db("log") // remove comment if you want to connect to database log by default.
- .port(6030u16)
- .build()
- .expect("TaosCfg builder error")
- .connect()
-}
+use taos::*;
#[tokio::main]
-async fn main() {
+async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
- let taos = taos_connect().expect("connect error");
- println!("Connected")
+ let taos = TaosBuilder::from_dsn("taos://")?.build()?;
+ println!("Connected");
+ Ok(())
}
diff --git a/docs/examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs
index 9261536f627c297fc707708f88f57eed647dbf3e..27b2bb4788615810d097b88f0dd616b96885538c 100644
--- a/docs/examples/rust/restexample/examples/insert_example.rs
+++ b/docs/examples/rust/restexample/examples/insert_example.rs
@@ -1,18 +1,29 @@
-use libtaos::*;
+use taos::*;
#[tokio::main]
-async fn main() -> Result<(), Error> {
- let taos = TaosCfg::default().connect().expect("fail to connect");
- taos.create_database("power").await?;
- taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
- let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
- let result = taos.query(sql).await?;
- println!("{:?}", result);
+async fn main() -> anyhow::Result<()> {
+ let dsn = "ws://";
+ let taos = TaosBuilder::from_dsn(dsn)?.build()?;
+
+
+ taos.exec_many([
+ "DROP DATABASE IF EXISTS power",
+ "CREATE DATABASE power",
+ "USE power",
+ "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
+ ]).await?;
+
+ let inserted = taos.exec("INSERT INTO
+ power.d1001 USING power.meters TAGS('San Francisco', 2)
+ VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
+ ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
+ power.d1002 USING power.meters TAGS('San Francisco', 3)
+ VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
+ power.d1003 USING power.meters TAGS('Los Angeles', 2)
+ VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
+ power.d1004 USING power.meters TAGS('Los Angeles', 3)
+ VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
+
+ assert_eq!(inserted, 8);
Ok(())
}
-
-// output:
-// TaosQueryData { column_meta: [ColumnMeta { name: "affected_rows", type_: Int, bytes: 4 }], rows: [[Int(8)]] }
diff --git a/docs/examples/rust/restexample/examples/query_example.rs b/docs/examples/rust/restexample/examples/query_example.rs
index bbe0cfaabf0d3b078606e823dd504be4153832e7..1a11401bba869169203851c083b69ffaea68e200 100644
--- a/docs/examples/rust/restexample/examples/query_example.rs
+++ b/docs/examples/rust/restexample/examples/query_example.rs
@@ -1,39 +1,25 @@
-use libtaos::*;
+use taos::sync::*;
-fn taos_connect() -> Result {
- TaosCfgBuilder::default()
- .ip("localhost")
- .user("root")
- .pass("taosdata")
- .db("power")
- .port(6030u16)
- .build()
- .expect("TaosCfg builder error")
- .connect()
-}
-
-#[tokio::main]
-async fn main() -> Result<(), Error> {
- let taos = taos_connect().expect("connect error");
- let result = taos.query("SELECT ts, current FROM meters LIMIT 2").await?;
+fn main() -> anyhow::Result<()> {
+ let taos = TaosBuilder::from_dsn("ws:///power")?.build()?;
+ let mut result = taos.query("SELECT ts, current FROM meters LIMIT 2")?;
// print column names
- let meta: Vec = result.column_meta;
- for column in meta {
- print!("{}\t", column.name)
- }
- println!();
+ let meta = result.fields();
+ println!("{}", meta.iter().map(|field| field.name()).join("\t"));
+
// print rows
- let rows: Vec> = result.rows;
+ let rows = result.rows();
for row in rows {
- for field in row {
- print!("{}\t", field);
+ let row = row?;
+ for (_name, value) in row {
+ print!("{}\t", value);
}
println!();
}
Ok(())
}
-// output:
+// output(suppose you are in +8 timezone):
// ts current
-// 2022-03-28 09:56:51.249 10.3
-// 2022-03-28 09:56:51.749 12.6
+// 2018-10-03T14:38:05+08:00 10.3
+// 2018-10-03T14:38:15+08:00 12.6
diff --git a/docs/examples/rust/schemalessexample/Cargo.toml b/docs/examples/rust/schemalessexample/Cargo.toml
deleted file mode 100644
index 32c6a122318d16f488210da54f1600ba8f6a8b7c..0000000000000000000000000000000000000000
--- a/docs/examples/rust/schemalessexample/Cargo.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[package]
-name = "schemalessexample"
-version = "0.1.0"
-edition = "2021"
-
-[dependencies]
-libtaos = { version = "0.4.3" }
diff --git a/docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs
deleted file mode 100644
index 64d1a3c9ac6037c16e3e1c3be0258e19cce632a0..0000000000000000000000000000000000000000
--- a/docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-use libtaos::schemaless::*;
-use libtaos::*;
-
-fn main() {
- let taos = TaosCfg::default().connect().expect("fail to connect");
- taos.raw_query("CREATE DATABASE test").unwrap();
- taos.raw_query("USE test").unwrap();
- let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
- "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
- "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
- "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
- let affected_rows = taos
- .schemaless_insert(
- &lines,
- TSDB_SML_LINE_PROTOCOL,
- TSDB_SML_TIMESTAMP_MILLISECONDS,
- )
- .unwrap();
- println!("affected_rows={}", affected_rows);
-}
-
-// run with: cargo run --example influxdb_line_example
diff --git a/docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs
deleted file mode 100644
index e61691596704c8aaf979081429802df6e5aa86f9..0000000000000000000000000000000000000000
--- a/docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-use libtaos::schemaless::*;
-use libtaos::*;
-
-fn main() {
- let taos = TaosCfg::default().connect().expect("fail to connect");
- taos.raw_query("CREATE DATABASE test").unwrap();
- taos.raw_query("USE test").unwrap();
- let lines = [
- r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
- {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
- {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#,
- ];
-
- let affected_rows = taos
- .schemaless_insert(
- &lines,
- TSDB_SML_JSON_PROTOCOL,
- TSDB_SML_TIMESTAMP_NOT_CONFIGURED,
- )
- .unwrap();
- println!("affected_rows={}", affected_rows); // affected_rows=4
-}
-
-// run with: cargo run --example opentsdb_json_example
diff --git a/docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs
deleted file mode 100644
index c8cab7655a24806e5c7659af80e83da383539c55..0000000000000000000000000000000000000000
--- a/docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-use libtaos::schemaless::*;
-use libtaos::*;
-
-fn main() {
- let taos = TaosCfg::default().connect().expect("fail to connect");
- taos.raw_query("CREATE DATABASE test").unwrap();
- taos.raw_query("USE test").unwrap();
- let lines = [
- "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
- "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
- "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
- "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
- "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
- "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
- "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
- "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
- ];
- let affected_rows = taos
- .schemaless_insert(
- &lines,
- TSDB_SML_TELNET_PROTOCOL,
- TSDB_SML_TIMESTAMP_NOT_CONFIGURED,
- )
- .unwrap();
- println!("affected_rows={}", affected_rows); // affected_rows=8
-}
-
-// run with: cargo run --example opentsdb_telnet_example
diff --git a/docs/examples/rust/schemalessexample/src/main.rs b/docs/examples/rust/schemalessexample/src/main.rs
deleted file mode 100644
index e7a11a969c037e00a796aafeff6258501ec15e9a..0000000000000000000000000000000000000000
--- a/docs/examples/rust/schemalessexample/src/main.rs
+++ /dev/null
@@ -1,3 +0,0 @@
-fn main() {
- println!("Hello, world!");
-}
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 771b3f020c8e00c778ea43ad8dfd8d0e870abf84..64a7d419e1cdf9a0e2617ad65c07be5c29fb7b62 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[集群管理](./cluster)一章。
+我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index ea548570d5cf2806e87970dd4ed009ad2baee871..e3ab7459c0e05304b2635f24694fb0ffde6a1bf1 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -3,7 +3,7 @@ title: 产品简介
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/subscribe)、[流式计算](../develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
@@ -11,21 +11,22 @@ TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-se
TDengine的主要功能如下:
-1. 高速数据写入,除 [SQL 写入](/develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](/reference/schemaless/),支持 [InfluxDB LINE 协议](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json)等协议写入;
-2. 第三方数据采集工具 [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQ](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
-3. 支持[各种查询](/develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
-4. 支持[用户自定义函数](/develop/udf)
-5. 支持[缓存](/develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
-6. 支持[流式计算](/develop/continuous-query)(Stream Processing)
-7. 支持[数据订阅](/develop/subscribe),而且可以指定过滤条件
-8. 支持[集群](/cluster/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
-9. 提供[命令行程序](/reference/taos-shell),便于管理集群,检查系统状态,做即席查询
-10. 提供多种数据的[导入](/operation/import)、[导出](/operation/export)
-11. 支持对[TDengine 集群本身的监控](/operation/monitor)
-12. 提供 [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) 等多种编程语言的[连接器](/reference/connector/)
-13. 支持 [REST 接口](/reference/rest-api/)
-14. 支持与[ Grafana 无缝集成](/third-party/grafana)
+1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
+2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
+3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
+4. 支持[用户自定义函数](../develop/udf)
+5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
+6. 支持[流式计算](../develop/stream)(Stream Processing)
+7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
+8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
+9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
+11. 支持对[TDengine 集群本身的监控](../operation/monitor)
+12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/)
+13. 支持 [REST 接口](../reference/rest-api/)
+14. 支持与[ Grafana 无缝集成](../third-party/grafana)
15. 支持与 Google Data Studio 无缝集成
+16. 支持 [Kubenetes 部署](../deployment/k8s)
更多细小的功能,请阅读整个文档。
diff --git a/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
index 54f02c91475bb5524e259a0aa890363603a86fba..f88284ad676edaae1ff9424ae7a7dfe93aaebba2 100644
--- a/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -54,9 +54,6 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
-
-
-
diff --git a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 1cc402c3c0ca42d0d8b97d28256afbbfe5299707..4f63e17635a713f1f91785cc0fced89fe9340a95 100644
--- a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -46,9 +46,6 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
-
-
-
diff --git a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
index 09cb698fba1662303d1a4f7744b9c28eae5b2e51..b0257b9cb71ad7aafbadd29d8b6d574e4e024796 100644
--- a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -63,9 +63,6 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
-
-
-
diff --git a/docs/zh/07-develop/03-insert-data/_rust_line.mdx b/docs/zh/07-develop/03-insert-data/_rust_line.mdx
index dbb35d76bc3517463902b642ce4a3861ae42b2f8..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/zh/07-develop/03-insert-data/_rust_line.mdx
+++ b/docs/zh/07-develop/03-insert-data/_rust_line.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx
index cc2055510bce006491ed277a8e884b9958a5a993..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx
+++ b/docs/zh/07-develop/03-insert-data/_rust_opts_json.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx
index 109c0c5d019e250b87e12c535e4f55c69924b4af..25d322f8a76de6da95969f86498910871cf3d5d6 100644
--- a/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx
+++ b/docs/zh/07-develop/03-insert-data/_rust_opts_telnet.mdx
@@ -1,3 +1,2 @@
```rust
-{{#include docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}}
```
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md
index ab1bff4449dce4f8e00c60bc4c77656c49676add..eb8afd0505c0ff88750a538f4f889a29aa979e36 100644
--- a/docs/zh/10-deployment/03-k8s.md
+++ b/docs/zh/10-deployment/03-k8s.md
@@ -3,11 +3,20 @@ sidebar_label: Kubernetes
title: 在 Kubernetes 上部署 TDengine 集群
---
-以下配置文件可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。
+作为面向云原生架构设计的时序数据库,TDengine 支持 Kubernetes 部署。这里介绍如何使用 YAML 文件一步一步从头创建一个 TDengine 集群,并重点介绍 Kubernetes 环境下 TDengine 的常用操作。
+
+## 前置条件
+
+要使用 Kubernetes 部署管理 TDengine 集群,需要做好如下准备工作。
+
+* 本文和下一章使用 minikube、kubectl 和 helm 等工具进行安装部署,请提前安装好相应软件
+* Kubernetes 已经安装部署并能正常访问使用或更新必要的容器仓库或其他服务
+
+以下配置文件也可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。
## 配置 Service 服务
-创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
+创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的端口:
```yaml
---
@@ -31,7 +40,8 @@ spec:
## 有状态服务 StatefulSet
-根据 Kubernetes 对各类部署的说明,我们将使用 StatefulSet 作为 TDengine 的服务类型,创建文件 `tdengine.yaml`:
+根据 Kubernetes 对各类部署的说明,我们将使用 StatefulSet 作为 TDengine 的服务类型。
+创建文件 `tdengine.yaml`,其中 replicas 定义集群节点的数量为 3。节点时区为中国(Asia/Shanghai),每个节点分配 10G 标准(standard)存储。你也可以根据实际情况进行相应修改。
```yaml
---
@@ -43,7 +53,7 @@ metadata:
app: "tdengine"
spec:
serviceName: "taosd"
- replicas: 2
+ replicas: 3
updateStrategy:
type: RollingUpdate
selector:
@@ -58,10 +68,7 @@ spec:
containers:
- name: "tdengine"
image: "tdengine/tdengine:3.0.0.0"
- imagePullPolicy: "Always"
- envFrom:
- - configMapRef:
- name: taoscfg
+ imagePullPolicy: "IfNotPresent"
ports:
- name: tcp6030
protocol: "TCP"
@@ -130,10 +137,9 @@ spec:
```bash
kubectl apply -f taosd-service.yaml
kubectl apply -f tdengine.yaml
-
```
-上面的配置将生成一个三节点的 TDengine 集群,dnode 是自动配置的,可以使用 show dnodes 命令查看当前集群的节点:
+上面的配置将生成一个三节点的 TDengine 集群,dnode 为自动配置,可以使用 show dnodes 命令查看当前集群的节点:
```bash
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
diff --git a/docs/zh/14-reference/03-connector/rust.mdx b/docs/zh/14-reference/03-connector/rust.mdx
index 25a8409b6e6faca651d1eaf3e02fbd4a0199c557..ae644e191166e244ae42373aeef2cbbacbe9e0e1 100644
--- a/docs/zh/14-reference/03-connector/rust.mdx
+++ b/docs/zh/14-reference/03-connector/rust.mdx
@@ -10,222 +10,268 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparition.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
-import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx"
-import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx"
-import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx"
+import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
-[](https://crates.io/crates/libtaos)  [](https://docs.rs/libtaos)
+[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
-`libtaos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。
+`taos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。
-`libtaos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。另外一种是 **REST 连接**,它通过 taosAdapter 的 REST 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 features)” 来指定使用哪种连接器。REST 连接支持任何平台,但原生连接支持所有 TDengine 客户端能运行的平台。
+`taos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例。另外一种是 **Websocket 连接**,它通过 taosAdapter 的 Websocket 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 `features`)” 来指定使用哪种连接器(默认同时支持)。Websocket 连接支持任何平台,原生连接支持所有 TDengine 客户端能运行的平台。
-`libtaos` 的源码托管在 [GitHub](https://github.com/taosdata/libtaos-rs)。
+该 Rust 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-rust)。
## 支持的平台
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
-REST 连接支持所有能运行 Rust 的平台。
+Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持
请参考[版本支持列表](/reference/connector#版本支持)
-Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 2.4 版本以上的 TDengine,以避免已知问题。
+Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
## 安装
### 安装前准备
+
* 安装 Rust 开发工具链
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
-### 添加 libtaos 依赖
+### 添加 taos 依赖
-根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [libtaos][libtaos] 依赖:
+根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [taos][taos] 依赖:
-
-
+
+
-在 `Cargo.toml` 文件中添加 [libtaos][libtaos]:
+在 `Cargo.toml` 文件中添加 [taos][taos]:
```toml
[dependencies]
# use default feature
-libtaos = "*"
+taos = "*"
```
-
-在 `Cargo.toml` 文件中添加 [libtaos][libtaos],并启用 `rest` 特性。
+
+
+在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `native` 特性:
```toml
[dependencies]
-# use rest feature
-libtaos = { version = "*", features = ["rest"]}
+taos = { version = "*", default-features = false, features = ["native"] }
```
-
-
-
-### 使用连接池
+
-请在 `Cargo.toml` 中启用 `r2d2` 特性。
+在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `ws` 特性。
```toml
[dependencies]
-# with taosc
-libtaos = { version = "*", features = ["r2d2"] }
-# or rest
-libtaos = { version = "*", features = ["rest", "r2d2"] }
+taos = { version = "*", default-features = false, features = ["ws"] }
```
+
+
+
## 建立连接
-[TaosCfgBuilder] 为使用者提供构造器形式的 API,以便于后续创建连接或使用连接池。
+[TaosBuilder] 通过 DSN 连接描述字符串创建一个连接构造器。
```rust
-let cfg: TaosCfg = TaosCfgBuilder::default()
- .ip("127.0.0.1")
- .user("root")
- .pass("taosdata")
- .db("log") // do not set if not require a default database.
- .port(6030u16)
- .build()
- .expect("TaosCfg builder error");
-}
+let builder = TaosBuilder::from_dsn("taos://")?;
```
现在您可以使用该对象创建连接:
```rust
-let conn = cfg.connect()?;
+let conn = builder.build()?;
```
连接对象可以创建多个:
```rust
-let conn = cfg.connect()?;
-let conn2 = cfg.connect()?;
+let conn1 = builder.build()?;
+let conn2 = builder.build()?;
+```
+
+DSN 描述字符串基本结构如下:
+
+```text
+[+]://[[:@]:][/][?=[&=]]
+|------|------------|---|-----------|-----------|------|------|------------|-----------------------|
+|driver| protocol | | username | password | host | port | database | params |
+```
+
+各部分意义见下表:
+
+- **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名:
+ - **taos**: 表名使用 TDengine 连接器驱动。
+ - **tmq**: 使用 TMQ 订阅数据。
+ - **http/ws**: 使用 Websocket 创建连接。
+ - **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 连接。
+- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
+- **username/password**: 用于创建连接的用户名及密码。
+- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,Websocket 连接默认为 `localhost:6041` 。
+- **database**: 指定默认连接的数据库名。
+- **params**:其他可选参数。
+
+一个完整的 DSN 描述字符串示例如下:
+
+```text
+taos+ws://localhost:6041/test
```
-可以在应用中使用连接池:
+表示使用 Websocket(`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
+
+这使得用户可以通过 DSN 指定连接方式:
```rust
-let pool = r2d2::Pool::builder()
- .max_size(10000) // max connections
- .build(cfg)?;
+use taos::*;
-// ...
-// Use pool to get connection
-let conn = pool.get()?;
+// use native protocol.
+let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
+let conn1 = builder.build();
+
+// use websocket protocol.
+let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
```
-之后您可以对数据库进行相关操作:
+建立连接后,您可以进行相关数据库操作:
```rust
-async fn demo() -> Result<(), Error> {
- // get connection ...
-
- // create database
- conn.exec("create database if not exists demo").await?;
- // change database context
- conn.exec("use demo").await?;
- // create table
- conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?;
- // insert
- conn.exec("insert into tb1 values(now, 1)").await?;
- // query
- let rows = conn.query("select * from tb1").await?;
- for row in rows.rows {
- println!("{}", row.into_iter().join(","));
+async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
+ // prepare database
+ taos.exec_many([
+ format!("DROP DATABASE IF EXISTS `{db}`"),
+ format!("CREATE DATABASE `{db}`"),
+ format!("USE `{db}`"),
+ ])
+ .await?;
+
+ let inserted = taos.exec_many([
+ // create super table
+ "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
+ TAGS (`groupid` INT, `location` BINARY(16))",
+ // create child table
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ // insert into child table
+ "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
+ // insert with NULL values
+ "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
+ // insert and automatically create table with tags if not exists
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ // insert many records in a single sql
+ "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
+ ]).await?;
+
+ assert_eq!(inserted, 6);
+ let mut result = taos.query("select * from `meters`").await?;
+
+ for field in result.fields() {
+ println!("got field: {}", field.name());
}
+
+ let values = result.
}
```
-## 使用示例
+查询数据可以通过两种方式:使用内建类型或 [serde](https://serde.rs) 序列化框架。
-### 写入数据
+```rust
+ // Query option 1, use rows stream.
+ let mut rows = result.rows();
+ while let Some(row) = rows.try_next().await? {
+ for (name, value) in row {
+ println!("got value of {}: {}", name, value);
+ }
+ }
-#### SQL 写入
+ // Query options 2, use deserialization with serde.
+ #[derive(Debug, serde::Deserialize)]
+ #[allow(dead_code)]
+ struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+ groupid: i32,
+ // binary/varchar to String
+ location: String,
+ }
-
+ let records: Vec = taos
+ .query("select * from `meters`")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
+
+ dbg!(records);
+ Ok(())
+```
-#### InfluxDB 行协议写入
+## 使用示例
-
+### 写入数据
-#### OpenTSDB Telnet 行协议写入
+#### SQL 写入
-
+
-#### OpenTSDB JSON 行协议写入
+#### STMT 写入
-
+
### 查询数据
-### 更多示例程序
-
-| 程序路径 | 程序说明 |
-| -------------- | ----------------------------------------------------------------------------- |
-| [demo.rs] | 基本API 使用示例 |
-| [bailongma-rs] | 使用 TDengine 作为存储后端的 Prometheus 远程存储 API 适配器,使用 r2d2 连接池 |
-
## API 参考
-### 连接构造器 API
-
-[Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) 构造器模式是 Rust 处理复杂数据类型或可选配置类型的解决方案。[libtaos] 实现中,使用连接构造器 [TaosCfgBuilder] 作为 TDengine Rust 连接器的入口。[TaosCfgBuilder] 提供对服务器、端口、数据库、用户名和密码等的可选配置。
+### 连接构造器
-使用 `default()` 方法可以构建一个默认参数的 [TaosCfg],用于后续连接数据库或建立连接池。
+通过 DSN 来构建一个连接器构造器。
```rust
-let cfg = TaosCfgBuilder::default().build()?;
+let cfg = TaosBuilder::default().build()?;
```
-使用构造器模式,用户可按需设置:
+使用 `builder` 对象创建多个连接:
```rust
-let cfg = TaosCfgBuilder::default()
- .ip("127.0.0.1")
- .user("root")
- .pass("taosdata")
- .db("log")
- .port(6030u16)
- .build()?;
-```
-
-使用 [TaosCfg] 对象创建 TDengine 连接:
-
-```rust
-let conn: Taos = cfg.connect();
+let conn: Taos = cfg.build();
```
### 连接池
-在复杂应用中,建议启用连接池。[libtaos] 的连接池使用 [r2d2] 实现。
+在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。
如下,可以生成一个默认参数的连接池。
```rust
-let pool = r2d2::Pool::new(cfg)?;
+let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
```
同样可以使用连接池的构造器,对连接池参数进行设置:
```rust
- use std::time::Duration;
- let pool = r2d2::Pool::builder()
- .max_size(5000) // max connections
- .max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection
- .min_idle(Some(1000)) // minimal idle connections
- .connection_timeout(Duration::from_minutes(2))
- .build(cfg);
+let dsn = "taos://localhost:6030";
+
+let opts = PoolBuilder::new()
+ .max_size(5000) // max connections
+ .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
+ .min_idle(Some(1000)) // minimal idle connections
+ .connection_timeout(Duration::from_secs(2));
+
+let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
```
在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。
@@ -236,44 +282,85 @@ let taos = pool.get()?;
### 连接
-[Taos] 结构体是 [libtaos] 中的连接管理者,主要提供了两个 API:
+[Taos][struct.Taos] 对象提供了多个数据库操作的 API:
1. `exec`: 执行某个非查询类 SQL 语句,例如 `CREATE`,`ALTER`,`INSERT` 等。
```rust
- taos.exec().await?;
+ let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?;
+ ```
+
+2. `exec_many`: 同时(顺序)执行多个 SQL 语句。
+
+ ```rust
+ taos.exec_many([
+ "CREATE DATABASE test",
+ "USE test",
+ "CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)",
+ ]).await?;
```
-2. `query`:执行查询语句,返回 [TaosQueryData] 对象。
+3. `query`:执行查询语句,返回 [ResultSet] 对象。
```rust
- let q = taos.query("select * from log.logs").await?;
+ let mut q = taos.query("select * from log.logs").await?;
```
- [TaosQueryData] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度):
+ [ResultSet] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度):
- 列信息使用 [ColumnMeta] 存储:
+ 列信息使用 [.fields()] 方法获取:
```rust
- let cols = &q.column_meta;
+ let cols = q.fields();
for col in cols {
- println!("name: {}, type: {:?}, bytes: {}", col.name, col.type_, col.bytes);
+ println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes());
}
```
逐行获取数据:
```rust
- for (i, row) in q.rows.iter().enumerate() {
- for (j, cell) in row.iter().enumerate() {
- println!("cell({}, {}) data: {}", i, j, cell);
+ let mut rows = result.rows();
+ let mut nrows = 0;
+ while let Some(row) = rows.try_next().await? {
+ for (col, (name, value)) in row.enumerate() {
+ println!(
+ "[{}] got value in col {} (named `{:>8}`): {}",
+ nrows, col, name, value
+ );
}
+ nrows += 1;
+ }
+ ```
+
+ 或使用 [serde](https://serde.rs) 序列化框架。
+
+ ```rust
+ #[derive(Debug, Deserialize)]
+ struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+ groupid: i32,
+ // binary/varchar to String
+ location: String,
}
+
+ let records: Vec = taos
+ .query("select * from `meters`")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
```
需要注意的是,需要使用 Rust 异步函数和异步运行时。
-[Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率:
+[Taos][struct.Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率:
- `.describe(table: &str)`: 执行 `DESCRIBE` 并返回一个 Rust 数据结构。
- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。
@@ -283,42 +370,61 @@ let taos = pool.get()?;
### 参数绑定接口
-与 C 接口类似,Rust 提供参数绑定接口。首先,通过 [Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]:
+与 C 接口类似,Rust 提供参数绑定接口。首先,通过 [Taos][struct.Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]:
```rust
-let mut stmt: Stmt = taos.stmt("insert into ? values(?,?)")?;
+let mut stmt = Stmt::init(&taos).await?;
+stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
```
参数绑定对象提供了一组接口用于实现参数绑定:
-##### `.set_tbname(tbname: impl ToCString)`
+#### `.set_tbname(name)`
用于绑定表名。
-##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)`
+```rust
+let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
+stmt.set_tbname("d0")?;
+```
+
+#### `.set_tags(&[tag])`
当 SQL 语句使用超级表时,用于绑定子表表名和标签值:
```rust
-let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(?,?)")?;
-// tags can be created with any supported type, here is an example using JSON
-let v = Field::Json(serde_json::from_str("{\"tag1\":\"一二三四五六七八九十\"}").unwrap());
-stmt.set_tbname_tags("tb0", [&tag])?;
+let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)")?;
+stmt.set_tbname("d0")?;
+stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
-##### `.bind(params: impl IntoParams)`
+#### `.bind(&[column])`
-用于绑定值类型。使用 [Field] 结构体构建需要的类型并绑定:
+用于绑定值类型。使用 [ColumnView] 结构体构建需要的类型并绑定:
```rust
-let ts = Field::Timestamp(Timestamp::now());
-let value = Field::Float(0.0);
-stmt.bind(vec![ts, value].iter())?;
+let params = vec![
+ ColumnView::from_millis_timestamp(vec![164000000000]),
+ ColumnView::from_bools(vec![true]),
+ ColumnView::from_tiny_ints(vec![i8::MAX]),
+ ColumnView::from_small_ints(vec![i16::MAX]),
+ ColumnView::from_ints(vec![i32::MAX]),
+ ColumnView::from_big_ints(vec![i64::MAX]),
+ ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
+ ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
+ ColumnView::from_unsigned_ints(vec![u32::MAX]),
+ ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
+ ColumnView::from_floats(vec![f32::MAX]),
+ ColumnView::from_doubles(vec![f64::MAX]),
+ ColumnView::from_varchar(vec!["ABC"]),
+ ColumnView::from_nchar(vec!["涛思数据"]),
+];
+let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
```
-##### `.execute()`
+#### `.execute()`
-执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。
+执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。执行前请确保所有数据已通过 `.add_batch` 加入到执行队列中。
```rust
stmt.execute()?;
@@ -329,60 +435,84 @@ stmt.execute()?;
//stmt.execute()?;
```
-### 行协议接口
+一个可运行的示例请见 [GitHub 上的示例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)。
+
+### 订阅
+
+TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
-行协议接口支持多种模式和不同精度,需要引入 schemaless 模块中的常量以进行设置:
+从 DSN 开始,构建一个 TMQ 连接器。
```rust
-use libtaos::*;
-use libtaos::schemaless::*;
+let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
-- InfluxDB 行协议
+创建消费者:
- ```rust
- let lines = [
- "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000"
- "st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000"
- ];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)?;
- ```
+```rust
+let mut consumer = tmq.build()?;
+```
-- OpenTSDB Telnet 协议
+消费者可订阅一个或多个 `TOPIC`。
- ```rust
- let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?;
- ```
+```rust
+consumer.subscribe(["tmq_meters"]).await?;
+```
-- OpenTSDB JSON 协议
+TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
- ```rust
- let lines = [r#"
- {
- "metric": "st",
- "timestamp": 1626006833,
- "value": 10,
- "tags": {
- "t1": true,
- "t2": false,
- "t3": 10,
- "t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+```rust
+{
+ let mut stream = consumer.stream();
+
+ while let Some((offset, message)) = stream.try_next().await? {
+ // get information from offset
+
+ // the topic
+ let topic = offset.topic();
+ // the vgroup id, like partition id in kafka.
+ let vgroup_id = offset.vgroup_id();
+ println!("* in vgroup id {vgroup_id} of topic {topic}\n");
+
+ if let Some(data) = message.into_data() {
+ while let Some(block) = data.fetch_raw_block().await? {
+ // one block for one table, get table name if needed
+ let name = block.table_name();
+ let records: Vec = block.deserialize().try_collect()?;
+ println!(
+ "** table: {}, got {} records: {:#?}\n",
+ name.unwrap(),
+ records.len(),
+ records
+ );
}
- }"#];
- taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?;
- ```
+ }
+ consumer.commit(offset).await?;
+ }
+}
+```
+
+停止订阅:
+
+```rust
+consumer.unsubscribe().await;
+```
+
+对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
+
+- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
+- `client.id`: 可选的订阅客户端识别项。
+- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
+- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
+- `auto.commit.interval.ms`: 自动标记的时间间隔。
+
+完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
-其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。
+其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。
-[libtaos]: https://github.com/taosdata/libtaos-rs
-[tdengine]: https://github.com/taosdata/TDengine
-[bailongma-rs]: https://github.com/taosdata/bailongma-rs
+[taos]: https://github.com/taosdata/rust-connector-taos
[r2d2]: https://crates.io/crates/r2d2
-[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs
-[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html
-[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html
-[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html
-[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html
-[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html
-[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html
+[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
+[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
+[struct.Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html
+[Stmt]: https://docs.rs/taos/latest/taos/struct.Stmt.html
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index f2712f2814593bddd65401cb129c8c58ee55a316..ae4280e26a64e2d10534a0faaf70ca0704cf58a6 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -3,8 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine
-从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@@ -41,10 +40,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
| -------- | -------- | ------------ | -------------- |
| 1 | 无或 f64 | double | 8 |
| 2 | f32 | float | 4 |
-| 3 | i8 | TinyInt | 1 |
-| 4 | i16 | SmallInt | 2 |
-| 5 | i32 | Int | 4 |
-| 6 | i64 或 i | Bigint | 8 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@@ -69,20 +68,21 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
-排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t\*” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
+排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
+为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
-2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。
+2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。
5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
-7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。
-8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
+7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
+8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
:::
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index 5b39bc854e16f1870d105c291506a9259fc96224..410fa02ded3c16bd0e1fd2c669b5c8c46a7e1801 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -249,6 +249,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid);
+
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index db87bde5213a2837b67f0e34fdcad4499a70cb28..38c50550591b47ae6e5fc5075340c6c8d5af7c23 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -269,6 +269,7 @@ typedef struct SSelectStmt {
bool hasInterpFunc;
bool hasLastRowFunc;
bool hasTimeLineFunc;
+ bool hasUdaf;
bool onlyHasKeepOrderFunc;
bool groupSort;
} SSelectStmt;
diff --git a/packaging/deb/DEBIAN/control b/packaging/deb/DEBIAN/control
index fd3f81ba082d11f6ff3979382a63597b5806fa1f..a2005be138ea5d41d5e96d17df53d1b73b59b533 100644
--- a/packaging/deb/DEBIAN/control
+++ b/packaging/deb/DEBIAN/control
@@ -1,5 +1,5 @@
Package: tdengine
-Version: 1.0.0
+Version: 3.0.0
Section: utils
Priority: optional
#Essential: no
diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst
index 184653991c719e24dbbb19f8441cc2103443b622..8a1a7d4d8160c0680d2492699e3d8c4a5709b096 100644
--- a/packaging/deb/DEBIAN/preinst
+++ b/packaging/deb/DEBIAN/preinst
@@ -1,5 +1,10 @@
#!/bin/bash
+if [ -f /var/lib/taos/dnode/dnodeCfg.json ]; then
+ echo -e "The default data directory \033[41;37m/var/lib/taos\033[0m contains old data of tdengine 2.x, please clear it before installing!"
+ exit 1
+fi
+
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 14c814eb839cd49fac566fde10a39f7b5d800962..5676bf5c4324a340aab09c18c59636d4fc80d43c 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -1,5 +1,9 @@
#!/bin/bash
+if [ $1 -eq "abort-upgrade" ]; then
+ exit 0
+fi
+
insmetaPath="/usr/local/taos/script"
csudo=""
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index d3d4bab0e697f10a4f1824931206cf53fd11d07b..7a34f7a222b13b5e91a88297428df3f425960309 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -132,6 +132,10 @@ fi
#Scripts executed before installation
%pre
+if [ -f /var/lib/taos/dnode/dnodeCfg.json ]; then
+ echo -e "The default data directory \033[41;37m/var/lib/taos\033[0m contains old data of tdengine 2.x, please clear it before installing!"
+ exit 1
+fi
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo "
diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index 2b5d440a732214a5dafdbb1587844f755b5f3db1..1c11ee708569f44dbaa797bf58db20b62849e0fc 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -5,6 +5,10 @@ if (DEFINED GRANT_CFG_INCLUDE_DIR)
add_definitions(-DGRANTS_CFG)
endif()
+IF (TD_GRANT)
+ ADD_DEFINITIONS(-D_GRANT)
+ENDIF ()
+
target_include_directories(
common
PUBLIC "${TD_SOURCE_DIR}/include/common"
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 16681fb70562a74c569a66cb91886e603eaeddb6..6dddcc2f7422aaa09d4e8b7691cce0c2fc107b6d 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -346,7 +346,7 @@ static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)},
{TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)},
- {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
+ // {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)},
{TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index dba30bb87612f60c02696672f71efc0737aa2d59..84896277211ee06f941793beadeed89d60d0f10f 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1713,7 +1713,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);
for (int32_t i = 0; i < sz; i++) {
- SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
+ SSDataBlock* pDataBlock = taosArrayGetP(dataBlocks, i);
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
@@ -1870,10 +1870,10 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
* @brief TODO: Assume that the final generated result it less than 3M
*
* @param pReq
- * @param pDataBlock
+ * @param pDataBlocks
* @param vgId
* @param suid
- *
+ *
*/
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid) {
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index d13daffa0879853db127dde916d672d357ee9ed4..7c6807ab87220b8cbaecddfd9b0278c0b13aa0fe 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -337,6 +337,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_CONTINUE, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
@@ -347,7 +348,6 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index 1a226abe5c8487b463a1b39fcaa4e5f4f45ff30a..0a42f06081fbc75a114badedf0886786f934985d 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -54,7 +54,7 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (IsReq(pMsg)) {
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("msg:%p, failed to process since %s", pMsg, terrstr());
+ dGError("msg:%p, failed to process since %s", pMsg, terrstr(code));
}
vmSendRsp(pMsg, code);
}
@@ -72,7 +72,7 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, terrstr());
+ dGError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, terrstr(code));
vmSendRsp(pMsg, code);
}
@@ -89,7 +89,7 @@ static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to process stream since %s", pVnode->vgId, pMsg, terrstr());
+ dGError("vgId:%d, msg:%p failed to process stream since %s", pVnode->vgId, pMsg, terrstr(code));
vmSendRsp(pMsg, code);
}
@@ -110,7 +110,7 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
+ dGError("vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr(code));
vmSendRsp(pMsg, code);
}
@@ -156,7 +156,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
if ((pMsg->msgType == TDMT_SCH_QUERY) && (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS)) {
terrno = TSDB_CODE_GRANT_EXPIRED;
code = terrno;
- dDebug("vgId:%d, msg:%p put into vnode-query queue failed since %s", pVnode->vgId, pMsg, terrstr());
+ dDebug("vgId:%d, msg:%p put into vnode-query queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
} else {
vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
dGTrace("vgId:%d, msg:%p put into vnode-query queue", pVnode->vgId, pMsg);
@@ -179,11 +179,11 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
if (!osDataSpaceAvailable()) {
terrno = TSDB_CODE_VND_NO_DISKSPACE;
code = terrno;
- dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr());
+ dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
} else if ((pMsg->msgType == TDMT_VND_SUBMIT) && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
terrno = TSDB_CODE_VND_NO_WRITE_AUTH;
code = terrno;
- dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr());
+ dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
} else {
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
taosWriteQitem(pVnode->pWriteQ, pMsg);
diff --git a/source/dnode/mnode/impl/CMakeLists.txt b/source/dnode/mnode/impl/CMakeLists.txt
index c740ea1397e7b406f0b832d3d63aa88b9accd4e0..25a4397b7defabb855397862127ee4ecaead0ccd 100644
--- a/source/dnode/mnode/impl/CMakeLists.txt
+++ b/source/dnode/mnode/impl/CMakeLists.txt
@@ -15,6 +15,7 @@ target_include_directories(
target_link_libraries(
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser
)
+
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(mnode grant)
ADD_DEFINITIONS(-D_GRANT)
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 9dde083f5054c6557f7dd85049133f44c46522f6..853ace79fd79bd2c30684446d0c12f5640eb881c 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -1419,7 +1419,7 @@ const char *mndGetDbStr(const char *src) {
return pos;
}
-int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
+static int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
int64_t v = 0;
switch (unit) {
case 's':
@@ -1444,7 +1444,7 @@ int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
return v;
}
-char *buildRetension(SArray *pRetension) {
+static char *buildRetension(SArray *pRetension) {
size_t size = taosArrayGetSize(pRetension);
if (size == 0) {
return NULL;
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index cb9f3d980933b5d564d3314474ca02d7d1cdb391..b218d982e9e37f0315bde21ff4c29fdee9154cc3 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -86,6 +86,11 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
+
+IF (TD_GRANT)
+ TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
+ENDIF ()
+
target_compile_definitions(vnode PUBLIC -DMETA_REFACT)
if(${BUILD_WITH_INVERTEDINDEX})
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 7427f79509d863bf9b16c7d9cbc10b4f5f8fecc6..3e7fd9df2b299a66a49b98a16aff1e661113532c 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -209,8 +209,8 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
return 0;
_err:
- metaError("vgId:%d, failed to create stb:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
- pReq->suid, tstrerror(terrno));
+ metaError("vgId:%d, failed to create stb:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name, pReq->suid,
+ tstrerror(terrno));
return -1;
}
@@ -304,7 +304,8 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
if (ret < 0) {
- ASSERT(0);
+ terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
+ // ASSERT(0);
return -1;
}
@@ -1196,6 +1197,9 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
goto _exit;
}
+ metaDebug("vgId:%d, set schema:(%" PRId64 ") sver:%d since %s", TD_VID(pMeta->pVnode), pME->uid, pSW->version,
+ tstrerror(terrno));
+
_exit:
taosMemoryFree(pVal);
tEncoderClear(&coder);
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 6b513f0242d4ede6fcfb66b0ee89b82a96447873..b7a2efd4897ffca43e2a7e8b25c3ca3f897c5f03 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -293,7 +293,9 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
if (pItem->maxDelay > TSDB_MAX_ROLLUP_MAX_DELAY) {
pItem->maxDelay = TSDB_MAX_ROLLUP_MAX_DELAY;
}
+
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
+ taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
", finally maxdelay:%" PRIi32,
TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
@@ -613,34 +615,38 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
while (1) {
uint64_t ts;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
- if (code < 0) {
- smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
- pItem->level, terrstr(code));
- goto _err;
+ if (code < 0) {
+ if (code == TSDB_CODE_QRY_IN_EXEC) {
+ break;
+ } else {
+ smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
+ pItem->level, terrstr(code));
+ goto _err;
+ }
}
if (taosArrayGetSize(pResList) == 0) {
if (terrno == 0) {
- smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
+ // smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
} else {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
break;
+ } else {
+ smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
}
- for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) {
- SSDataBlock *output = taosArrayGetP(pResList, i);
-
#if 1
- char flag[10] = {0};
- snprintf(flag, 10, "level %" PRIi8, pItem->level);
-// blockDebugShowDataBlocks(output, flag);
-// taosArrayDestroy(pResult);
+ char flag[10] = {0};
+ snprintf(flag, 10, "level %" PRIi8, pItem->level);
+ blockDebugShowDataBlocks(pResList, flag);
#endif
- STsdb * sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
- SSubmitReq *pReq = NULL;
+ for (int32_t i = 0; i < taosArrayGetSize(pResList); ++i) {
+ SSDataBlock *output = taosArrayGetP(pResList, i);
+ STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
+ SSubmitReq *pReq = NULL;
// TODO: the schema update should be handled later(TD-17965)
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
@@ -655,11 +661,11 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
SMA_VID(pSma), suid, pItem->level, terrstr());
goto _err;
}
-
+ taosMemoryFreeClear(pReq);
+
smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
SMA_VID(pSma), suid, pItem->level, output->info.version);
- taosMemoryFreeClear(pReq);
}
}
@@ -692,15 +698,12 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
}
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
-
tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid,
STREAM_INPUT__DATA_SUBMIT);
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
if (smaMgmt.tmrHandle) {
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- } else {
- ASSERT(0);
}
return TSDB_CODE_SUCCESS;
@@ -746,7 +749,6 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
return NULL;
}
-
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
SRSmaInfo *pCowRSmaInfo = NULL;
// lock
@@ -793,13 +795,7 @@ static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, suid);
if (!pRSmaInfo) {
- smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
- return TSDB_CODE_SUCCESS;
- }
-
- if (!RSMA_INFO_QTASK(pRSmaInfo, 0)) {
- tdReleaseRSmaInfo(pSma, pRSmaInfo);
- smaDebug("vgId:%d, execute rsma, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
+ smaError("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
return TSDB_CODE_SUCCESS;
}
@@ -1331,14 +1327,16 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem);
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
+ pRSmaInfo->refId);
return;
}
SRSmaStat *pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
if (!pStat) {
- smaDebug("rsma fetch task not start since already destroyed, rsetId rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
- pRSmaInfo->refId);
+ smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%" PRIi64 " refId:%d)",
+ smaMgmt.rsetId, pRSmaInfo->refId);
return;
}
@@ -1350,8 +1348,8 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
- smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64
- " refId:%d",
+ smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
+ ", rsetId rsetId:%" PRIi64 " refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
@@ -1366,30 +1364,31 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma),
- pItem->level, pRSmaInfo->suid);
+ smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
- smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused",
+ smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_INACTIVE: {
- smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is inactive",
+ smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is inactive",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_INIT: {
- smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is init", SMA_VID(pSma),
- pItem->level, pRSmaInfo->suid);
+ smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid::%" PRIi64 " since stat is init",
+ SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
default: {
- smaWarn("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is unknown",
+ smaWarn("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
}
_end:
+ // taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
}
@@ -1402,7 +1401,7 @@ _end:
* @return int32_t
*/
int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
- SRSmaFetchMsg fetchMsg = { .suid = pInfo->suid, .level = level};
+ SRSmaFetchMsg fetchMsg = {.suid = pInfo->suid, .level = level};
int32_t ret = 0;
int32_t contLen = 0;
SEncoder encoder = {0};
@@ -1431,7 +1430,7 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
.contLen = contLen,
};
- if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) {
+ if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s",
SMA_VID(pSma), pInfo->suid, level, terrstr());
goto _err;
@@ -1462,7 +1461,7 @@ int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
- return -1;
+ goto _err;
}
pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead));
@@ -1479,7 +1478,7 @@ int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
terrno = TSDB_CODE_RSMA_EMPTY_INFO;
}
smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma),
- req.suid, req.level, terrstr());
+ req.suid, req.level, terrstr());
goto _err;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 38334dac0525c027f8f2acaad9ebcd1853d49b43..0831f3d75a64edebdaa9c2b5cc58f4d8e58bfdcc 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#include "tsdb.h"
#include "osDef.h"
+#include "tsdb.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
typedef enum {
@@ -130,8 +130,8 @@ struct STsdbReader {
SBlockLoadSuppInfo suppInfo;
STsdbReadSnap* pReadSnap;
SIOCostSummary cost;
- STSchema* pSchema;// the newest version schema
- STSchema* pMemSchema;// the previous schema for in-memory data, to avoid load schema too many times
+ STSchema* pSchema; // the newest version schema
+ STSchema* pMemSchema; // the previous schema for in-memory data, to avoid load schema too many times
SDataFReader* pFileReader;
SVersionRange verRange;
@@ -1213,17 +1213,17 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
-static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key, SFileBlockDumpInfo* pDumpInfo) {
-
+static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key,
+ SFileBlockDumpInfo* pDumpInfo) {
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
- int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
+ int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
- if (nextKey != key) { // merge is not needed
+ if (nextKey != key) { // merge is not needed
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
pDumpInfo->rowIndex += step;
return true;
@@ -1239,7 +1239,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1);
}
- if (sversion == pReader->pSchema->version) {
+ if (pReader->pSchema && sversion == pReader->pSchema->version) {
return pReader->pSchema;
}
@@ -1265,10 +1265,10 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
SBlockData* pBlockData = &pReader->status.fileBlockData;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- SArray* pDelList = pBlockScanInfo->delSkyline;
- bool freeTSRow = false;
+ TSDBKEY k = TSDBROW_KEY(pRow);
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ SArray* pDelList = pBlockScanInfo->delSkyline;
+ bool freeTSRow = false;
uint64_t uid = pBlockScanInfo->uid;
// ascending order traverse
@@ -2153,7 +2153,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
- TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
+ TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
if (outOfTimeWindow(key.ts, &pReader->window)) {
pIter->hasVal = false;
return NULL;
@@ -2186,7 +2186,6 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
}
-
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader) {
while (1) {
@@ -2318,9 +2317,8 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader, bool* freeTSRow) {
-
TSDBROW* pNextRow = NULL;
- TSDBROW current = *pRow;
+ TSDBROW current = *pRow;
{ // if the timestamp of the next valid row has a different ts, return current row directly
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
@@ -2350,6 +2348,10 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe
// get the correct schema for data in memory
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(¤t), pReader, uid);
+ if (pReader->pSchema == NULL) {
+ pReader->pSchema = pTSchema;
+ }
+
tRowMergerInit2(&merge, pReader->pSchema, ¤t, pTSchema);
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
@@ -2390,8 +2392,8 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
tRowMergerGetRow(&merge, pTSRow);
}
-int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow,
- int64_t endKey, bool* freeTSRow) {
+int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey,
+ bool* freeTSRow) {
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
SArray* pDelList = pBlockScanInfo->delSkyline;
@@ -2446,7 +2448,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
+ STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
SColVal colVal = {0};
int32_t i = 0, j = 0;
@@ -2532,7 +2534,7 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
do {
STSRow* pTSRow = NULL;
- bool freeTSRow = false;
+ bool freeTSRow = false;
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey, &freeTSRow);
if (pTSRow == NULL) {
break;
@@ -2581,9 +2583,7 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
return metaGetIvtIdx(pMeta);
}
-uint64_t getReaderMaxVersion(STsdbReader *pReader) {
- return pReader->verRange.maxVer;
-}
+uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
/**
* @brief Get all suids since suid
@@ -2761,7 +2761,8 @@ void tsdbReaderClose(STsdbReader* pReader) {
SIOCostSummary* pCost = &pReader->cost;
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
- " SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, "
+ " SMA-time:%.2f ms, fileBlocks:%" PRId64
+ ", fileBlocks-time:%.2f ms, "
"build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
@@ -2769,7 +2770,9 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
- taosMemoryFree(pReader->pMemSchema);
+ if (pReader->pMemSchema != pReader->pSchema) {
+ taosMemoryFree(pReader->pMemSchema);
+ }
taosMemoryFreeClear(pReader);
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index ecff58f3b1ac68349ef7909e52deb55ae26b8597..d5c5e186681d129d4e3b94dad53939637d22cdaf 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -293,6 +293,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
+ case TDMT_VND_FETCH_RSMA:
+ return smaProcessFetch(pVnode->pSma, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -329,8 +331,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetTableCfg(pVnode, pMsg, true);
case TDMT_VND_BATCH_META:
return vnodeGetBatchMeta(pVnode, pMsg);
- case TDMT_VND_FETCH_RSMA:
- return smaProcessFetch(pVnode->pSma, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
@@ -357,7 +357,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
// TODO: remove the function
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
// TODO
- blockDebugShowDataBlocks(data, __func__);
+ // blockDebugShowDataBlocks(data, __func__);
tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
}
diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h
index a1ba70c7021c1ea263c38a5cc9d450b5f879bd50..a56f8e8c049cca1cf606541ea8938f4f648bb32b 100644
--- a/source/libs/executor/inc/tsimplehash.h
+++ b/source/libs/executor/inc/tsimplehash.h
@@ -45,6 +45,8 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
*/
int32_t tSimpleHashGetSize(const SSHashObj *pHashObj);
+int32_t tSimpleHashPrint(const SSHashObj *pHashObj);
+
/**
* put element into hash table, if the element with the same key exists, update it
* @param pHashObj
@@ -98,6 +100,15 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj);
*/
void *tSimpleHashGetKey(const SSHashObj* pHashObj, void *data, size_t* keyLen);
+/**
+ * Create the hash table iterator
+ * @param pHashObj
+ * @param data
+ * @param iter
+ * @return void*
+ */
+void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 1fc0f5ff51636bfb10fafc26a75ab5bc523688aa..7115ad85a50884d21f496a900d21f0e954c07ea0 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -529,7 +529,6 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
cleanUpUdfs();
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
atomic_store_64(&pTaskInfo->owner, 0);
-
return pTaskInfo->code;
}
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 9e485e7684da599cbd4d94169f67df893be1b288..be129cb6b488ca5625796ab65f1b6835c5cbe75d 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -3612,11 +3612,16 @@ void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) {
taosRemoveRef(exchangeObjRefPool, pExInfo->self);
}
+void freeSourceDataInfo(void *p) {
+ SSourceDataInfo* pInfo = (SSourceDataInfo*)p;
+ taosMemoryFreeClear(pInfo->pRsp);
+}
+
void doDestroyExchangeOperatorInfo(void* param) {
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
taosArrayDestroy(pExInfo->pSources);
- taosArrayDestroy(pExInfo->pSourceDataInfo);
+ taosArrayDestroyEx(pExInfo->pSourceDataInfo, freeSourceDataInfo);
if (pExInfo->pResultBlockList != NULL) {
taosArrayDestroyEx(pExInfo->pResultBlockList, freeBlock);
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index b4489a92fbdd3f2cd95809a6ef11541ca88e27dd..6778e97d7ad63e14dfc19091d12767ecbfc140e5 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -2215,6 +2215,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
colDataAppend(pDst, rows, (char *)current.val, false);
}
+ taosMemoryFree(current.val);
pResBlock->info.rows += 1;
break;
}
@@ -2437,10 +2438,10 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
// if its the last point in data block, no need to fill, but reserve this point as the start value and do
// the interpolation when processing next data block.
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
+ doKeepLinearInfo(pSliceInfo, pBlock, i, false);
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
@@ -2478,11 +2479,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
doKeepPrevRows(pSliceInfo, pBlock, i);
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
// no need to increate pSliceInfo->current here
//pSliceInfo->current =
// taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
+ doKeepLinearInfo(pSliceInfo, pBlock, i, false);
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
@@ -2558,10 +2559,10 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
+ doKeepLinearInfo(pSliceInfo, pBlock, i, false);
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
@@ -2618,6 +2619,35 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
return pResBlock->info.rows == 0 ? NULL : pResBlock;
}
+void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) {
+ STimeSliceOperatorInfo* pInfo = (STimeSliceOperatorInfo*)param;
+
+ pInfo->pRes = blockDataDestroy(pInfo->pRes);
+
+ for (int32_t i = 0; i < taosArrayGetSize(pInfo->pPrevRow); ++i) {
+ SGroupKeys* pKey = taosArrayGet(pInfo->pPrevRow, i);
+ taosMemoryFree(pKey->pData);
+ }
+ taosArrayDestroy(pInfo->pPrevRow);
+
+ for (int32_t i = 0; i < taosArrayGetSize(pInfo->pNextRow); ++i) {
+ SGroupKeys* pKey = taosArrayGet(pInfo->pNextRow, i);
+ taosMemoryFree(pKey->pData);
+ }
+ taosArrayDestroy(pInfo->pNextRow);
+
+ for (int32_t i = 0; i < taosArrayGetSize(pInfo->pLinearInfo); ++i) {
+ SFillLinearInfo* pKey = taosArrayGet(pInfo->pLinearInfo, i);
+ taosMemoryFree(pKey->start.val);
+ taosMemoryFree(pKey->end.val);
+ }
+ taosArrayDestroy(pInfo->pLinearInfo);
+
+ taosMemoryFree(pInfo->pFillColInfo);
+ taosMemoryFreeClear(param);
+}
+
+
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo) {
STimeSliceOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(STimeSliceOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@@ -2665,7 +2695,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyBasicOperatorInfo, NULL, NULL, NULL);
+ createOperatorFpSet(operatorDummyOpenFn, doTimeslice, NULL, NULL, destroyTimeSliceOperatorInfo, NULL, NULL, NULL);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index e709643af9e808eb64db914284cae529f3cd32d3..7989ad2b5a44e6ca35074cff15ec865492025328 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -62,7 +62,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
}
SSHashObj *pHashObj = (SSHashObj *)taosMemoryCalloc(1, sizeof(SSHashObj));
- if (pHashObj == NULL) {
+ if (!pHashObj) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
@@ -78,7 +78,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
pHashObj->dataLen = dataLen;
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
- if (pHashObj->hashList == NULL) {
+ if (!pHashObj->hashList) {
taosMemoryFree(pHashObj);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -87,7 +87,7 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
}
int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) {
- if (pHashObj == NULL) {
+ if (!pHashObj) {
return 0;
}
return (int32_t)atomic_load_64((int64_t *)&pHashObj->size);
@@ -95,7 +95,7 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) {
static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dsize);
- if (pNewNode == NULL) {
+ if (!pNewNode) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
@@ -120,7 +120,7 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
int64_t st = taosGetTimestampUs();
void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, sizeof(void *) * newCapacity);
- if (pNewEntryList == NULL) {
+ if (!pNewEntryList) {
// qWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
return;
}
@@ -133,22 +133,21 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
for (int32_t idx = 0; idx < pHashObj->capacity; ++idx) {
SHNode *pNode = pHashObj->hashList[idx];
- if (pNode == NULL) {
+ if (!pNode) {
continue;
}
- SHNode *pNext;
+ SHNode *pNext = NULL;
SHNode *pPrev = NULL;
-
while (pNode != NULL) {
void *key = GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen);
- uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->dataLen);
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity);
pNext = pNode->next;
if (newIdx != idx) {
- if (pPrev == NULL) {
+ if (!pPrev) {
pHashObj->hashList[idx] = pNext;
} else {
pPrev->next = pNext;
@@ -172,7 +171,7 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
}
int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
- if (pHashObj == NULL || key == NULL) {
+ if (!pHashObj || !key) {
return -1;
}
@@ -186,13 +185,14 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHNode *pNode = pHashObj->hashList[slot];
- if (pNode == NULL) {
- SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->size, hashVal);
- if (pNewNode == NULL) {
+ if (!pNode) {
+ SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal);
+ if (!pNewNode) {
return -1;
}
pHashObj->hashList[slot] = pNewNode;
+ atomic_add_fetch_64(&pHashObj->size, 1);
return 0;
}
@@ -203,9 +203,9 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
pNode = pNode->next;
}
- if (pNode == NULL) {
- SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->size, hashVal);
- if (pNewNode == NULL) {
+ if (!pNode) {
+ SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal);
+ if (!pNewNode) {
return -1;
}
pNewNode->next = pHashObj->hashList[slot];
@@ -234,7 +234,7 @@ static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void
static FORCE_INLINE bool taosHashTableEmpty(const SSHashObj *pHashObj) { return tSimpleHashGetSize(pHashObj) == 0; }
void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
- if (pHashObj == NULL || taosHashTableEmpty(pHashObj) || key == NULL) {
+ if (!pHashObj || taosHashTableEmpty(pHashObj) || !key) {
return NULL;
}
@@ -242,7 +242,7 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHNode *pNode = pHashObj->hashList[slot];
- if (pNode == NULL) {
+ if (!pNode) {
return NULL;
}
@@ -256,19 +256,43 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
}
int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key) {
- // todo
+ if (!pHashObj || !key) {
+ return TSDB_CODE_FAILED;
+ }
+
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
+
+ int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+
+ SHNode *pNode = pHashObj->hashList[slot];
+ SHNode *pPrev = NULL;
+ while (pNode) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) {
+ if (!pPrev) {
+ pHashObj->hashList[slot] = pNode->next;
+ } else {
+ pPrev->next = pNode->next;
+ }
+ FREE_HASH_NODE(pNode);
+ atomic_sub_fetch_64(&pHashObj->size, 1);
+ break;
+ }
+ pPrev = pNode;
+ pNode = pNode->next;
+ }
+
return TSDB_CODE_SUCCESS;
}
void tSimpleHashClear(SSHashObj *pHashObj) {
- if (pHashObj == NULL) {
+ if (!pHashObj || taosHashTableEmpty(pHashObj)) {
return;
}
- SHNode *pNode, *pNext;
+ SHNode *pNode = NULL, *pNext = NULL;
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
pNode = pHashObj->hashList[i];
- if (pNode == NULL) {
+ if (!pNode) {
continue;
}
@@ -278,11 +302,11 @@ void tSimpleHashClear(SSHashObj *pHashObj) {
pNode = pNext;
}
}
- pHashObj->size = 0;
+ atomic_store_64(&pHashObj->size, 0);
}
void tSimpleHashCleanup(SSHashObj *pHashObj) {
- if (pHashObj == NULL) {
+ if (!pHashObj) {
return;
}
@@ -291,7 +315,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) {
}
size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
- if (pHashObj == NULL) {
+ if (!pHashObj) {
return 0;
}
@@ -299,11 +323,58 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
}
void *tSimpleHashGetKey(const SSHashObj *pHashObj, void *data, size_t *keyLen) {
+#if 0
int32_t offset = offsetof(SHNode, data);
SHNode *node = ((SHNode *)(char *)data - offset);
- if (keyLen != NULL) {
+ if (keyLen) {
*keyLen = pHashObj->keyLen;
}
+ return POINTER_SHIFT(data, pHashObj->dataLen);
+
return GET_SHASH_NODE_KEY(node, pHashObj->dataLen);
+#endif
+ if (keyLen) {
+ *keyLen = pHashObj->keyLen;
+ }
+
+ return POINTER_SHIFT(data, pHashObj->dataLen);
+}
+
+void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
+ if (!pHashObj) {
+ return NULL;
+ }
+
+ SHNode *pNode = NULL;
+
+ if (!data) {
+ for (int32_t i = 0; i < pHashObj->capacity; ++i) {
+ pNode = pHashObj->hashList[i];
+ if (!pNode) {
+ continue;
+ }
+ *iter = i;
+ return GET_SHASH_NODE_DATA(pNode);
+ }
+ return NULL;
+ }
+
+ pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
+
+ if (pNode->next) {
+ return GET_SHASH_NODE_DATA(pNode->next);
+ }
+
+ ++(*iter);
+ for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
+ pNode = pHashObj->hashList[i];
+ if (!pNode) {
+ continue;
+ }
+ *iter = i;
+ return GET_SHASH_NODE_DATA(pNode);
+ }
+
+ return NULL;
}
\ No newline at end of file
diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt
index acab27ec0876b881dc72aca67927ea3359ef9d57..18ca95481352e1bac61cef21eacc53bb4b94d39d 100644
--- a/source/libs/executor/test/CMakeLists.txt
+++ b/source/libs/executor/test/CMakeLists.txt
@@ -17,4 +17,19 @@ IF(NOT TD_DARWIN)
PUBLIC "${TD_SOURCE_DIR}/include/libs/executor/"
PRIVATE "${TD_SOURCE_DIR}/source/libs/executor/inc"
)
-ENDIF ()
\ No newline at end of file
+ENDIF ()
+
+# SET(CMAKE_CXX_STANDARD 11)
+# AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
+
+# ADD_EXECUTABLE(tSimpleHashTest tSimpleHashTests.cpp)
+# TARGET_LINK_LIBRARIES(
+# tSimpleHashTest
+# PRIVATE os util common executor gtest_main
+# )
+
+# TARGET_INCLUDE_DIRECTORIES(
+# tSimpleHashTest
+# PUBLIC "${TD_SOURCE_DIR}/include/common"
+# PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+# )
\ No newline at end of file
diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a17a7146eabd55914b0143de55ddf0a732cac162
--- /dev/null
+++ b/source/libs/executor/test/tSimpleHashTests.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include "taos.h"
+#include "thash.h"
+#include "tsimplehash.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+
+// int main(int argc, char **argv) {
+// testing::InitGoogleTest(&argc, argv);
+// return RUN_ALL_TESTS();
+// }
+
+TEST(testCase, tSimpleHashTest) {
+ SSHashObj *pHashObj =
+ tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), sizeof(int64_t), sizeof(int64_t));
+
+ assert(pHashObj != nullptr);
+
+ ASSERT_EQ(0, tSimpleHashGetSize(pHashObj));
+
+ int64_t originKeySum = 0;
+ for (int64_t i = 1; i <= 100; ++i) {
+ originKeySum += i;
+ tSimpleHashPut(pHashObj, (const void *)&i, (const void *)&i);
+ ASSERT_EQ(i, tSimpleHashGetSize(pHashObj));
+ }
+
+ for (int64_t i = 1; i <= 100; ++i) {
+ void *data = tSimpleHashGet(pHashObj, (const void *)&i);
+ ASSERT_EQ(i, *(int64_t *)data);
+ }
+
+
+ void *data = NULL;
+ int32_t iter = 0;
+ int64_t keySum = 0;
+ int64_t dataSum = 0;
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(pHashObj, data, NULL);
+ keySum += *(int64_t *)key;
+ dataSum += *(int64_t *)data;
+ }
+
+ ASSERT_EQ(keySum, dataSum);
+ ASSERT_EQ(keySum, originKeySum);
+
+ for (int64_t i = 1; i <= 100; ++i) {
+ tSimpleHashRemove(pHashObj, (const void *)&i);
+ ASSERT_EQ(100 - i, tSimpleHashGetSize(pHashObj));
+ }
+
+ tSimpleHashCleanup(pHashObj);
+}
+
+#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 4743a9aa9a2f3e3b97cd97d0011aaaa45fbef53d..be4ac404fd866665a3f1a98fd082d72cb038757b 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1350,6 +1350,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType);
pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType);
pSelect->hasTimeLineFunc = pSelect->hasTimeLineFunc ? true : fmIsTimelineFunc(pFunc->funcId);
+ pSelect->hasUdaf = pSelect->hasUdaf ? true : fmIsUserDefinedFunc(pFunc->funcId) && fmIsAggFunc(pFunc->funcId);
pSelect->onlyHasKeepOrderFunc = pSelect->onlyHasKeepOrderFunc ? fmIsKeepOrderFunc(pFunc->funcId) : false;
}
}
@@ -2644,6 +2645,11 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
return TSDB_CODE_SUCCESS;
}
+ if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
+ "Missing RANGE clause, EVERY clause or FILL clause");
+ }
+
int32_t code = translateExpr(pCxt, &pSelect->pRange);
if (TSDB_CODE_SUCCESS == code) {
code = translateExpr(pCxt, &pSelect->pEvery);
@@ -4734,6 +4740,11 @@ static bool crossTableWithoutAggOper(SSelectStmt* pSelect) {
!isPartitionByTbname(pSelect->pPartitionByList);
}
+static bool crossTableWithUdaf(SSelectStmt* pSelect) {
+ return pSelect->hasUdaf && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType &&
+ !isPartitionByTbname(pSelect->pPartitionByList);
+}
+
static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
if (NULL != pStmt->pOptions->pWatermark &&
(DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) {
@@ -4785,7 +4796,8 @@ static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) {
static int32_t checkStreamQuery(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type ||
- !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect)) {
+ !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
+ crossTableWithUdaf(pSelect)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index 5b222a8deca0d7f22a27286f903810004fda9c37..716dd7ffc000c5995a1121314825c6f1081d7079 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -294,16 +294,6 @@ TEST_F(ParserSelectTest, intervalSemanticCheck) {
TEST_F(ParserSelectTest, interp) {
useDb("root", "test");
- run("SELECT INTERP(c1) FROM t1");
-
- run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00')");
-
- run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') FILL(LINEAR)");
-
- run("SELECT INTERP(c1) FROM t1 EVERY(5s)");
-
- run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s)");
-
run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index f4f7c9aefdfe64f5e14aa02a4adecc1129683080..45ab3903a9e9eb6df844244b6fc7cd8d009ebd47 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -1615,6 +1615,9 @@ static int32_t partTagsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) {
TSWAP(((SPartitionLogicNode*)pNode)->pPartitionKeys, pScan->pGroupTags);
int32_t code = replaceLogicNode(pLogicSubplan, pNode, (SLogicNode*)pScan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = adjustLogicNodeDataRequirement((SLogicNode*)pScan, pNode->resultDataOrder);
+ }
if (TSDB_CODE_SUCCESS == code) {
NODES_CLEAR_LIST(pNode->pChildren);
nodesDestroyNode((SNode*)pNode);
diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp
index d7c947a20dce65be73e8be97172dbcfa5c00a70d..27ec409d52a912834ae6e3ec6e2e6a41f2812fe1 100644
--- a/source/libs/planner/test/planBasicTest.cpp
+++ b/source/libs/planner/test/planBasicTest.cpp
@@ -93,8 +93,6 @@ TEST_F(PlanBasicTest, tailFunc) {
TEST_F(PlanBasicTest, interpFunc) {
useDb("root", "test");
- run("SELECT INTERP(c1) FROM t1");
-
run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}
diff --git a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
index cc1d507df2c20200cd8ad77825e072471bc61779..2ff01263a45d7c6eeac3cf1bc553f567725fc2b5 100644
--- a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
+++ b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
@@ -61,6 +61,7 @@ endi
print =============== select * from stb from memory in designated vgroup
sql select _wstart, _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m);
print $data00 $data01 $data02 $data03 $data04
+print $data10 $data11 $data12 $data13 $data14
if $rows != 1 then
print rows $rows != 1
return -1
diff --git a/tests/system-test/2-query/Now.py b/tests/system-test/2-query/Now.py
index 386c8b9d31c00a104f335ec5bb61e5045056e4c5..35291856f2389a1aad47cdae8ccd762bde9f8f0f 100644
--- a/tests/system-test/2-query/Now.py
+++ b/tests/system-test/2-query/Now.py
@@ -9,12 +9,13 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(),True)
+ tdSql.init(conn.cursor())
self.setsql = TDSetSql()
+ self.dbname = 'db'
# name of normal table
- self.ntbname = 'ntb'
+ self.ntbname = f'{self.dbname}.ntb'
# name of stable
- self.stbname = 'stb'
+ self.stbname = f'{self.dbname}.stb'
# structure of column
self.column_dict = {
'ts':'timestamp',
@@ -72,19 +73,19 @@ class TDTestCase:
def now_check_ntb(self):
for time_unit in self.db_percision:
- tdSql.execute(f'create database db precision "{time_unit}"')
- tdSql.execute('use db')
+ tdSql.execute(f'create database {self.dbname} precision "{time_unit}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
for value in self.values_list:
tdSql.execute(
f'insert into {self.ntbname} values({value})')
self.data_check(self.ntbname,'normal table')
- tdSql.execute('drop database db')
+ tdSql.execute(f'drop database {self.dbname}')
def now_check_stb(self):
for time_unit in self.db_percision:
- tdSql.execute(f'create database db precision "{time_unit}"')
- tdSql.execute('use db')
+ tdSql.execute(f'create database {self.dbname} precision "{time_unit}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})")
@@ -93,7 +94,7 @@ class TDTestCase:
for i in range(self.tbnum):
self.data_check(f'{self.stbname}_{i}','child table')
self.data_check(self.stbname,'stable')
- tdSql.execute('drop database db')
+ tdSql.execute(f'drop database {self.dbname}')
def run(self): # sourcery skip: extract-duplicate-method
self.now_check_ntb()
diff --git a/tests/system-test/2-query/Timediff.py b/tests/system-test/2-query/Timediff.py
index 9c595a8c8c20b6fc7821fbef2a95db66396c9e36..d9bac2e9306843f1a46c5d77ad6e3c0130d84c37 100644
--- a/tests/system-test/2-query/Timediff.py
+++ b/tests/system-test/2-query/Timediff.py
@@ -19,9 +19,10 @@ class TDTestCase:
self.db_param_precision = ['ms','us','ns']
self.time_unit = ['1w','1d','1h','1m','1s','1a','1u','1b']
self.error_unit = ['2w','2d','2h','2m','2s','2a','2u','1c','#1']
- self.ntbname = 'ntb'
- self.stbname = 'stb'
- self.ctbname = 'ctb'
+ self.dbname = 'db'
+ self.ntbname = f'{self.dbname}.ntb'
+ self.stbname = f'{self.dbname}.stb'
+ self.ctbname = f'{self.dbname}.ctb'
self.subtractor = 1 # unit:s
def check_tbtype(self,tb_type):
if tb_type.lower() == 'ntb':
@@ -139,9 +140,9 @@ class TDTestCase:
tdSql.error(f'select timediff(c0,{self.subtractor},{unit}) from {self.ntbname}')
def function_check_ntb(self):
for precision in self.db_param_precision:
- tdSql.execute('drop database if exists db')
- tdSql.execute(f'create database db precision "{precision}"')
- tdSql.execute('use db')
+ tdSql.execute(f'drop database if exists {self.dbname}')
+ tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
@@ -151,9 +152,9 @@ class TDTestCase:
self.data_check(date_time,precision,'ntb')
def function_check_stb(self):
for precision in self.db_param_precision:
- tdSql.execute('drop database if exists db')
- tdSql.execute(f'create database db precision "{precision}"')
- tdSql.execute('use db')
+ tdSql.execute(f'drop database if exists {self.dbname}')
+ tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)')
for ts in self.ts_str:
diff --git a/tests/system-test/2-query/To_iso8601.py b/tests/system-test/2-query/To_iso8601.py
index a80978bcda87171d1135178e99854abc8b920667..ccc26e9b1b4efc14570d002711d8054b46a41fce 100644
--- a/tests/system-test/2-query/To_iso8601.py
+++ b/tests/system-test/2-query/To_iso8601.py
@@ -15,19 +15,20 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1640966400000 # 2022-1-1 00:00:00.000
+ self.dbname = 'db'
+ self.stbname = f'{self.dbname}.stb'
+ self.ntbname = f'{self.dbname}.ntb'
def check_customize_param_ms(self):
-
time_zone = time.strftime('%z')
- tdSql.execute('create database db1 precision "ms"')
- tdSql.execute('use db1')
- tdSql.execute('create table if not exists ntb(ts timestamp, c1 int, c2 timestamp)')
+ tdSql.execute(f'drop database if exists {self.dbname}')
+ tdSql.execute(f'create database {self.dbname} precision "ms"')
+ tdSql.execute(f'use {self.dbname}')
+ tdSql.execute(f'create table if not exists {self.ntbname}(ts timestamp, c1 int, c2 timestamp)')
for i in range(self.rowNum):
- tdSql.execute("insert into ntb values(%d, %d, %d)"
- % (self.ts + i, i + 1, self.ts + i))
- tdSql.query('select to_iso8601(ts) from ntb')
+ tdSql.execute(f"insert into {self.ntbname} values({self.ts + i}, {i + 1}, {self.ts + i})")
+ tdSql.query(f'select to_iso8601(ts) from {self.ntbname}')
for i in range(self.rowNum):
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{time_zone}')
-
timezone_list = ['+0000','+0100','+0200','+0300','+0330','+0400','+0500','+0530','+0600','+0700','+0800','+0900','+1000','+1100','+1200',\
'+00','+01','+02','+03','+04','+05','+06','+07','+08','+09','+10','+11','+12',\
'+00:00','+01:00','+02:00','+03:00','+03:30','+04:00','+05:00','+05:30','+06:00','+07:00','+08:00','+09:00','+10:00','+11:00','+12:00',\
@@ -36,62 +37,49 @@ class TDTestCase:
'-00:00','-01:00','-02:00','-03:00','-04:00','-05:00','-06:00','-07:00','-08:00','-09:00','-10:00','-11:00','-12:00',\
'z','Z']
for j in timezone_list:
- tdSql.query(f'select to_iso8601(ts,"{j}") from ntb')
+ tdSql.query(f'select to_iso8601(ts,"{j}") from {self.ntbname}')
for i in range(self.rowNum):
tdSql.checkEqual(tdSql.queryResult[i][0],f'2022-01-01T00:00:00.00{i}{j}')
-
error_param_list = [0,100.5,'a','!']
for i in error_param_list:
- tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
- #! bug TD-16372:对于错误的时区,缺少校验
+ tdSql.error(f'select to_iso8601(ts,"{i}") from {self.ntbname}')
error_timezone_param = ['+13','-13','+1300','-1300','+0001','-0001','-0330','-0530']
for i in error_timezone_param:
- tdSql.error(f'select to_iso8601(ts,"{i}") from ntb')
+ tdSql.error(f'select to_iso8601(ts,"{i}") from {self.ntbname}')
def check_base_function(self):
tdSql.prepare()
- tdLog.printNoPrefix("==========step1:create tables==========")
- tdSql.execute('create table if not exists ntb(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp)')
- tdSql.execute('create table if not exists stb(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp) tags(t0 int)')
- tdSql.execute('create table if not exists stb_1 using stb tags(100)')
-
- tdLog.printNoPrefix("==========step2:insert data==========")
- tdSql.execute('insert into ntb values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
- tdSql.execute('insert into stb_1 values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
-
- tdSql.query("select to_iso8601(ts) from ntb")
+ tdSql.execute('create table if not exists db.ntb(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp)')
+ tdSql.execute('create table if not exists db.stb(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp) tags(t0 int)')
+ tdSql.execute('create table if not exists db.stb_1 using db.stb tags(100)')
+ tdSql.execute('insert into db.ntb values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
+ tdSql.execute('insert into db.stb_1 values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
+ tdSql.query("select to_iso8601(ts) from db.ntb")
tdSql.checkRows(3)
- tdSql.query("select c1 from ntb where ts = to_iso8601(1577808000000)")
+ tdSql.query("select c1 from db.ntb where ts = to_iso8601(1577808000000)")
tdSql.checkRows(1)
tdSql.checkData(0,0,10)
- tdSql.query("select * from ntb where ts = to_iso8601(1577808000000)")
+ tdSql.query("select * from db.ntb where ts = to_iso8601(1577808000000)")
tdSql.checkRows(1)
- tdSql.query("select to_iso8601(ts) from ntb where ts=today()")
+ tdSql.query("select to_iso8601(ts) from db.ntb where ts=today()")
tdSql.checkRows(1)
for i in range(0,3):
- tdSql.query("select to_iso8601(1) from ntb")
+ tdSql.query("select to_iso8601(1) from db.ntb")
tdSql.checkData(i,0,"1970-01-01T08:00:01+0800")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(ts) from ntb")
- tdSql.checkRows(3)
tdSql.query("select to_iso8601(ts) from db.ntb")
-
- tdSql.query("select to_iso8601(today()) from ntb")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(now()) from ntb")
+ tdSql.query("select to_iso8601(today()) from db.ntb")
tdSql.checkRows(3)
-
- tdSql.error("select to_iso8601(timezone()) from ntb")
- tdSql.error("select to_iso8601('abc') from ntb")
-
+ tdSql.query("select to_iso8601(now()) from db.ntb")
+ tdSql.checkRows(3)
+ tdSql.error("select to_iso8601(timezone()) from db.ntb")
+ tdSql.error("select to_iso8601('abc') from db.ntb")
for i in ['+','-','*','/']:
- tdSql.query(f"select to_iso8601(today()) {i}null from ntb")
- tdSql.checkRows(3)
- tdSql.checkData(0,0,None)
tdSql.query(f"select to_iso8601(today()) {i}null from db.ntb")
tdSql.checkRows(3)
tdSql.checkData(0,0,None)
- tdSql.query("select to_iso8601(9223372036854775807) from ntb")
+ tdSql.query("select to_iso8601(9223372036854775807) from db.ntb")
tdSql.checkRows(3)
# bug TD-15207
# tdSql.query("select to_iso8601(10000000000) from ntb")
@@ -102,27 +90,22 @@ class TDTestCase:
# tdSql.checkData(0,0,None)
err_param = [1.5,'a','c2']
for i in err_param:
- tdSql.error(f"select to_iso8601({i}) from ntb")
tdSql.error(f"select to_iso8601({i}) from db.ntb")
-
- tdSql.query("select to_iso8601(now) from stb")
+ tdSql.query("select to_iso8601(now) from db.stb")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(now()) from stb")
+ tdSql.query("select to_iso8601(now()) from db.stb")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(1) from stb")
+ tdSql.query("select to_iso8601(1) from db.stb")
for i in range(0,3):
tdSql.checkData(i,0,"1970-01-01T08:00:01+0800")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(ts) from stb")
+ tdSql.query("select to_iso8601(ts) from db.stb")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(ts)+1 from stb")
+ tdSql.query("select to_iso8601(ts)+1 from db.stb")
tdSql.checkRows(3)
- tdSql.query("select to_iso8601(ts)+'a' from stb ")
+ tdSql.query("select to_iso8601(ts)+'a' from db.stb ")
tdSql.checkRows(3)
for i in ['+','-','*','/']:
- tdSql.query(f"select to_iso8601(today()) {i}null from stb")
- tdSql.checkRows(3)
- tdSql.checkData(0,0,None)
tdSql.query(f"select to_iso8601(today()) {i}null from db.stb")
tdSql.checkRows(3)
tdSql.checkData(0,0,None)
diff --git a/tests/system-test/2-query/To_unixtimestamp.py b/tests/system-test/2-query/To_unixtimestamp.py
index 60d5cc7b72f76f46b36c510f8f08f69dbb57c858..64f7b18e4102e39fe5171df902babab10cb78640 100644
--- a/tests/system-test/2-query/To_unixtimestamp.py
+++ b/tests/system-test/2-query/To_unixtimestamp.py
@@ -3,6 +3,7 @@ from time import sleep
from util.log import *
from util.sql import *
from util.cases import *
+from util.sqlset import TDSetSql
@@ -12,17 +13,19 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+ self.dbname = 'db'
# name of normal table
- self.ntbname = 'ntb'
+ self.ntbname = f'{self.dbname}.ntb'
# name of stable
- self.stbname = 'stb'
+ self.stbname = f'{self.dbname}.stb'
# structure of column
self.column_dict = {
'ts':'timestamp',
'c1':'int',
'c2':'float',
- 'c3':'binary(20)',
- 'c4':'nchar(20)'
+ 'c3':'binary(20)'
+
}
# structure of tag
self.tag_dict = {
@@ -32,69 +35,68 @@ class TDTestCase:
self.tbnum = 2
# values of tag,the number of values should equal to tbnum
self.tag_values = [
- f'10',
- f'100'
+ '10',
+ '100'
]
# values of rows, structure should be same as column
self.values_list = [
- f'now,10,99.99,"2020-1-1 00:00:00"',
- f'today(),100,11.111,22.222222'
+ f'now,10,99.99,"abc"',
+ f'today(),100,11.111,"abc"'
]
- self.error_param = [1,'now()']
-
+ self.error_param = [1,1.5,'now()']
+ def data_check(self,tbname,values_list,tb_type,tb_num=1):
+ for time in ['1970-01-01T08:00:00+0800','1970-01-01T08:00:00+08:00']:
+ tdSql.query(f"select to_unixtimestamp('{time}') from {tbname}")
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(len(values_list))
+ for i in range(len(values_list)):
+ tdSql.checkEqual(tdSql.queryResult[i][0],0)
+ elif tb_type == 'stb':
+ tdSql.checkRows(len(self.values_list)*tb_num)
+ for time in ['1900-01-01T08:00:00+08:00']:
+ tdSql.query(f"select to_unixtimestamp('{time}') from {tbname}")
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(len(values_list))
+ elif tb_type == 'stb':
+ tdSql.checkRows(len(self.values_list)*tb_num)
+ for time in ['2020-01-32T08:00:00','2020-13-32T08:00:00','acd']:
+ tdSql.query(f"select to_unixtimestamp('{time}') from {tbname}")
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(len(values_list))
+ for i in range(len(values_list)):
+ tdSql.checkEqual(tdSql.queryResult[i][0],None)
+ elif tb_type == 'stb':
+ tdSql.checkRows(len(values_list)*tb_num)
+ for i in self.column_dict.keys():
+ tdSql.query(f"select {i} from {tbname} where to_unixtimestamp('1970-01-01T08:00:00+08:00')=0")
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(len(values_list))
+ elif tb_type == 'stb':
+ tdSql.checkRows(len(values_list)*tb_num)
+ for time in self.error_param:
+ tdSql.error(f"select to_unixtimestamp({time}) from {tbname}")
+ def timestamp_change_check_ntb(self):
+ tdSql.execute(f'create database {self.dbname}')
+ tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
+ for i in range(len(self.values_list)):
+ tdSql.execute(f'insert into {self.ntbname} values({self.values_list[i]})')
+ self.data_check(self.ntbname,self.values_list,'ntb')
+ tdSql.execute(f'drop database {self.dbname}')
+ def timestamp_change_check_stb(self):
+ tdSql.execute(f'create database {self.dbname}')
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
+ for i in range(self.tbnum):
+ tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})')
+ for j in range(len(self.values_list)):
+ tdSql.execute(f'insert into {self.stbname}_{i} values({self.values_list[j]})')
+ for i in range(self.tbnum):
+ self.data_check(f'{self.stbname}_{i}',self.values_list,'ctb')
+ self.data_check(self.stbname,self.values_list,'stb',self.tbnum)
+ tdSql.execute(f'drop database {self.dbname}')
def run(self): # sourcery skip: extract-duplicate-method
- tdSql.prepare()
- tdLog.printNoPrefix("==========step1:create tables==========")
- tdSql.execute(
- '''create table if not exists ntb
- (ts timestamp, c1 int, c2 float,c3 double,c4 timestamp)
- '''
- )
- tdSql.execute(
- '''create table if not exists stb
- (ts timestamp, c1 int, c2 float,c3 double,c4 timestamp) tags(t0 int)
- '''
- )
- tdSql.execute(
- '''create table if not exists stb_1 using stb tags(100)
- '''
- )
- tdLog.printNoPrefix("==========step2:insert data into ntb==========")
-
- # RFC3339:2020-01-01T00:00:00+8:00
- # ISO8601:2020-01-01T00:00:00.000+0800
- tdSql.execute(
- 'insert into ntb values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
- tdSql.execute(
- 'insert into stb_1 values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())')
- tdSql.query("select to_unixtimestamp('1970-01-01T08:00:00+0800') from ntb")
- tdSql.checkData(0,0,0)
- tdSql.checkData(1,0,0)
- tdSql.checkData(2,0,0)
- tdSql.checkRows(3)
- tdSql.query("select to_unixtimestamp('1970-01-01T08:00:00+08:00') from ntb")
- tdSql.checkData(0,0,0)
- tdSql.checkRows(3)
- tdSql.query("select to_unixtimestamp('1900-01-01T08:00:00+08:00') from ntb")
- tdSql.checkRows(3)
- tdSql.query("select to_unixtimestamp('2020-01-32T08:00:00') from ntb")
- tdSql.checkRows(3)
- tdSql.checkData(0,0,None)
- tdSql.query("select to_unixtimestamp('2020-13-32T08:00:00') from ntb")
- tdSql.checkRows(3)
- tdSql.checkData(0,0,None)
- tdSql.query("select to_unixtimestamp('acd') from ntb")
- tdSql.checkRows(3)
- tdSql.checkData(0,0,None)
- tdSql.error("select to_unixtimestamp(1) from ntb")
- tdSql.error("select to_unixtimestamp(1.5) from ntb")
- tdSql.error("select to_unixtimestamp(ts) from ntb")
-
- tdSql.query("select ts from ntb where to_unixtimestamp('1970-01-01T08:00:00+08:00')=0")
- tdSql.checkRows(3)
-
-
+ self.timestamp_change_check_ntb()
+ self.timestamp_change_check_stb()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/2-query/Today.py b/tests/system-test/2-query/Today.py
index 43c5263af6dcb91c1b7700ce899ae1041f93f5fa..6887e7397034b2f99ad581a1fde412bca87c8007 100644
--- a/tests/system-test/2-query/Today.py
+++ b/tests/system-test/2-query/Today.py
@@ -20,8 +20,9 @@ class TDTestCase:
self.arithmetic_operators = ['+','-','*','/']
self.relational_operator = ['<','<=','=','>=','>']
# prepare data
- self.ntbname = 'ntb'
- self.stbname = 'stb'
+ self.dbname = 'db'
+ self.ntbname = f'{self.dbname}.ntb'
+ self.stbname = f'{self.dbname}.stb'
self.column_dict = {
'ts':'timestamp',
'c1':'int',
@@ -96,7 +97,7 @@ class TDTestCase:
tdSql.checkRows(num_same*tb_num)
elif tb == 'stb':
tdSql.checkRows(num_same)
- for i in [f'{tbname}',f'db.{tbname}']:
+ for i in [f'{tbname}']:
for unit in self.time_unit:
for symbol in ['+','-']:
tdSql.query(f"select today() {symbol}1{unit} from {i}")
@@ -148,20 +149,20 @@ class TDTestCase:
tdSql.checkData(i, 0, str(self.today_date))
def today_check_ntb(self):
for time_unit in self.db_percision:
- print(time_unit)
- tdSql.execute(f'create database db precision "{time_unit}"')
- tdSql.execute('use db')
+
+ tdSql.execute(f'create database {self.dbname} precision "{time_unit}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.set_create_normaltable_sql(self.ntbname,self.column_dict))
for i in self.values_list:
tdSql.execute(
f'insert into {self.ntbname} values({i})')
self.data_check(self.column_dict,self.ntbname,self.values_list,1,'tb',time_unit)
- tdSql.execute('drop database db')
+ tdSql.execute(f'drop database {self.dbname}')
def today_check_stb_tb(self):
for time_unit in self.db_percision:
- print(time_unit)
- tdSql.execute(f'create database db precision "{time_unit}"')
- tdSql.execute('use db')
+
+ tdSql.execute(f'create database {self.dbname} precision "{time_unit}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f'create table if not exists {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})')
@@ -172,7 +173,7 @@ class TDTestCase:
self.data_check(self.column_dict,f'{self.stbname}_{i}',self.values_list,1,'tb',time_unit)
# check stable
self.data_check(self.column_dict,self.stbname,self.values_list,self.tbnum,'stb',time_unit)
- tdSql.execute('drop database db')
+ tdSql.execute(f'drop database {self.dbname}')
def run(self): # sourcery skip: extract-duplicate-method
diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py
index bae77b582cec07f9ec4cdbaa9a910003d03394de..afc7ed36a56aa80a93638d38874f02932a203fb8 100644
--- a/tests/system-test/2-query/last.py
+++ b/tests/system-test/2-query/last.py
@@ -37,7 +37,7 @@ class TDTestCase:
def last_check_stb_tb_base(self):
tdSql.prepare()
- stbname = tdCom.getLongName(5, "letters")
+ stbname = f'db.{tdCom.getLongName(5, "letters")}'
column_dict = {
'col1': 'tinyint',
'col2': 'smallint',
@@ -61,7 +61,7 @@ class TDTestCase:
tdSql.execute(f"create table {stbname}_1 using {stbname} tags('beijing')")
tdSql.execute(f"insert into {stbname}_1(ts) values(%d)" % (self.ts - 1))
- for i in [f'{stbname}_1', f'db.{stbname}_1']:
+ for i in [f'{stbname}_1']:
tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
@@ -71,7 +71,7 @@ class TDTestCase:
# tdSql.checkRows(1)
# tdSql.checkData(0, 1, None)
for i in column_dict.keys():
- for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
+ for j in [f'{stbname}_1', f'{stbname}']:
tdSql.query(f"select last({i}) from {j}")
tdSql.checkRows(0)
tdSql.query(f"select last({list(column_dict.keys())[0]}) from {stbname}_1 group by {list(column_dict.keys())[-1]}")
@@ -79,12 +79,12 @@ class TDTestCase:
for i in range(self.rowNum):
tdSql.execute(f"insert into {stbname}_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
- for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
+ for i in [f'{stbname}_1',f'{stbname}']:
tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
for k, v in column_dict.items():
- for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
+ for j in [f'{stbname}_1', f'{stbname}']:
tdSql.query(f"select last({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
@@ -103,7 +103,7 @@ class TDTestCase:
# nchar
elif 'nchar' in v.lower():
tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}')
- for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']:
+ for i in [f'{stbname}_1', f'{stbname}']:
tdSql.query(f"select last({list(column_dict.keys())[0]},{list(column_dict.keys())[1]},{list(column_dict.keys())[2]}) from {stbname}_1")
tdSql.checkData(0, 2, 10)
@@ -113,7 +113,7 @@ class TDTestCase:
def last_check_ntb_base(self):
tdSql.prepare()
- ntbname = tdCom.getLongName(5, "letters")
+ ntbname = f'db.{tdCom.getLongName(5, "letters")}'
column_dict = {
'col1': 'tinyint',
'col2': 'smallint',
@@ -135,11 +135,8 @@ class TDTestCase:
tdSql.query(f"select last(*) from {ntbname}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
- tdSql.query(f"select last(*) from db.{ntbname}")
- tdSql.checkRows(1)
- tdSql.checkData(0, 1, None)
for i in column_dict.keys():
- for j in [f'{ntbname}', f'db.{ntbname}']:
+ for j in [f'{ntbname}']:
tdSql.query(f"select last({i}) from {j}")
tdSql.checkRows(0)
for i in range(self.rowNum):
@@ -148,11 +145,8 @@ class TDTestCase:
tdSql.query(f"select last(*) from {ntbname}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
- tdSql.query(f"select last(*) from db.{ntbname}")
- tdSql.checkRows(1)
- tdSql.checkData(0, 1, 10)
for k, v in column_dict.items():
- for j in [f'{ntbname}', f'db.{ntbname}']:
+ for j in [f'{ntbname}']:
tdSql.query(f"select last({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
@@ -178,8 +172,8 @@ class TDTestCase:
def last_check_stb_distribute(self):
# prepare data for vgroup 4
dbname = tdCom.getLongName(10, "letters")
- stbname = tdCom.getLongName(5, "letters")
- vgroup_num = 4
+ stbname = f'{dbname}.{tdCom.getLongName(5, "letters")}'
+ vgroup_num = 2
column_dict = {
'col1': 'tinyint',
'col2': 'smallint',
@@ -208,11 +202,7 @@ class TDTestCase:
f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.execute(
f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
- # for i in [f'{stbname}', f'{dbname}.{stbname}']:
- # tdSql.query(f"select last(*) from {i}")
- # tdSql.checkRows(1)
- # tdSql.checkData(0, 1, None)
- tdSql.query('show tables')
+ tdSql.query(f'show {dbname}.tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
@@ -222,20 +212,17 @@ class TDTestCase:
if vgroups_num >= 2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
continue
- # else:
- # tdLog.exit(
- # f'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(self.tbnum):
for j in range(self.rowNum):
tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1))
- for i in [f'{stbname}', f'{dbname}.{stbname}']:
+ for i in [f'{stbname}']:
tdSql.query(f"select last(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 10)
for k, v in column_dict.items():
- for j in [f'{stbname}', f'{dbname}.{stbname}']:
+ for j in [f'{stbname}']:
tdSql.query(f"select last({k}) from {j}")
tdSql.checkRows(1)
# tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned
diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py
index 43b5c5610a3452018e32c0f3a8cd0a4ad38c521e..3b027ed6a03e9e13f8782b91c36fdc5bb558c64d 100644
--- a/tests/system-test/2-query/percentile.py
+++ b/tests/system-test/2-query/percentile.py
@@ -23,13 +23,14 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1537146000000
self.setsql = TDSetSql()
- self.ntbname = 'ntb'
- self.stbname = 'stb'
+ self.dbname = 'db'
+ self.ntbname = f'{self.dbname}.ntb'
+ self.stbname = f'{self.dbname}.stb'
self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = {
@@ -100,10 +101,9 @@ class TDTestCase:
return intData,floatData
def check_tags(self,tags,param,num,value):
tdSql.query(f'select percentile({tags}, {param}) from {self.stbname}_{num}')
- print(tdSql.queryResult)
tdSql.checkEqual(tdSql.queryResult[0][0], value)
def function_check_ntb(self):
- tdSql.prepare()
+ tdSql.execute(f'create database {self.dbname}')
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
intData,floatData = self.insert_data(self.column_dict,self.ntbname,self.rowNum)
for k,v in self.column_dict.items():
@@ -116,8 +116,9 @@ class TDTestCase:
else:
tdSql.query(f'select percentile({k}, {param}) from {self.ntbname}')
tdSql.checkData(0, 0, np.percentile(floatData, param))
+ tdSql.execute(f'drop database {self.dbname}')
def function_check_ctb(self):
- tdSql.prepare()
+ tdSql.execute(f'create database {self.dbname}')
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})")
@@ -143,7 +144,7 @@ class TDTestCase:
data_num = tdSql.queryResult[0][0]
tdSql.query(f'select percentile({k},{param}) from {self.stbname}_{i}')
tdSql.checkData(0,0,data_num)
-
+ tdSql.execute(f'drop database {self.dbname}')
def run(self):
self.function_check_ntb()
self.function_check_ctb()
diff --git a/tests/system-test/2-query/stateduration.py b/tests/system-test/2-query/stateduration.py
index 6b504468145cab70c248a3dfd84dfb0fd5195882..b6ddff4017ad61826801da9de3278be0f27d4af4 100644
--- a/tests/system-test/2-query/stateduration.py
+++ b/tests/system-test/2-query/stateduration.py
@@ -15,251 +15,328 @@ from util.log import *
from util.cases import *
from util.sql import *
+
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ts = 1537146000000
- self.param_list = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ']
+ self.param_list = ['LT', 'lt', 'Lt', 'lT', 'GT', 'gt', 'Gt', 'gT', 'LE', 'le', 'Le',
+ 'lE', 'GE', 'ge', 'Ge', 'gE', 'NE', 'ne', 'Ne', 'nE', 'EQ', 'eq', 'Eq', 'eQ']
self.row_num = 10
- def run(self):
- tdSql.prepare()
+ self.dbname = 'db'
+ self.ntbname = f'{self.dbname}.ntb'
+ self.stbname = f'{self.dbname}.stb'
+
+ def duration_check(self):
+ tdSql.execute(f'create database {self.dbname}')
# timestamp = 1ms , time_unit = 1s
- tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {self.ntbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num):
- tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
- integer_list = [1,2,3,4,11,12,13,14]
- float_list = [5,6]
+ tdSql.execute(f"insert into {self.ntbname} values({self.ts + i}, {i + 1}, {i + 1}, {i + 1}, {i + 1}, {i + 0.1}, {i + 0.1}, {i % 2}, 'taosdata{i + 1}', '涛思数据{i + 1}', {i + 1}, {i + 1}, {i + 1}, {i + 1})"
+ )
+ integer_list = [1, 2, 3, 4, 11, 12, 13, 14]
+ float_list = [5, 6]
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5) from test")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5) from {self.ntbname}")
tdSql.checkRows(10)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5) from test")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5) from {self.ntbname}")
tdSql.checkRows(10)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- error_column_list = ['ts','col7','col8','col9','a',1]
+ error_column_list = ['ts', 'col7', 'col8', 'col9', 'a', 1]
for i in error_column_list:
for j in self.param_list:
- tdSql.error(f"select stateduration({i},{j},5) from test")
+ tdSql.error(
+ f"select stateduration({i},{j},5) from {self.ntbname}")
- error_param_list = ['a',1]
+ error_param_list = ['a', 1]
for i in error_param_list:
- tdSql.error(f"select stateduration(col1,{i},5) from test")
-
+ tdSql.error(
+ f"select stateduration(col1,{i},5) from {self.ntbname}")
+ tdSql.execute(f'drop table {self.ntbname}')
# timestamp = 1s, time_unit =1s
- tdSql.execute('''create table test1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {self.ntbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num):
- tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute(f"insert into {self.ntbname} values({self.ts + i*1000}, {i + 1}, {i + 1}, {i + 1}, {i + 1}, {i + 0.1}, {i + 0.1}, {i % 2}, 'taosdata{i + 1}', '涛思数据{i + 1}', {i + 1}, {i + 1}, {i + 1}, {i + 1})"
+ )
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5) from test1")
+ tdSql.query(f"select stateduration(col{i},'{j}',5) from {self.ntbname}")
tdSql.checkRows(10)
- # print(tdSql.queryResult)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,), (5000,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1000,), (2000,), (3000,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,), (5000,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1000,), (2000,), (3000,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5) from test1")
+ tdSql.query(f"select stateduration(col{i},'{j}',5) from {self.ntbname}")
tdSql.checkRows(10)
print(tdSql.queryResult)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1000,), (2000,), (3000,), (4000,), (5000,), (6000,), (7000,), (8000,), (9000,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
-
-
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1000,), (2000,), (3000,), (4000,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1000,), (2000,), (3000,), (4000,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1000,), (2000,), (3000,), (4000,), (5000,), (6000,), (7000,), (8000,), (9000,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ tdSql.execute(f'drop table {self.ntbname}')
# timestamp = 1m, time_unit =1m
- tdSql.execute('''create table test2(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {self.ntbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num):
- tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i*1000*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute(f"insert into {self.ntbname} values({self.ts + i*1000*60}, {i + 1}, {i + 1}, {i + 1}, {i + 1}, {i + 0.1},{i + 0.1}, {i % 2}, 'taosdata{i + 1}', '涛思数据{i + 1}',{i + 1}, {i + 1}, {i + 1}, {i + 1})"
+ )
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1m) from {self.ntbname}")
tdSql.checkRows(10)
- # print(tdSql.queryResult)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test2")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1m) from {self.ntbname}")
tdSql.checkRows(10)
print(tdSql.queryResult)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
-
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ tdSql.execute(f'drop table {self.ntbname}')
# timestamp = 1h, time_unit =1h
- tdSql.execute('''create table test3(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {self.ntbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.row_num):
- tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute(f"insert into {self.ntbname} values({self.ts + i*1000*60*60}, {i + 1}, {i + 1}, {i + 1}, {i + 1},{i + 0.1}, {i + 0.1}, {i % 2}, 'taosdata{i + 1}', '涛思数据{i + 1}', {i + 1}, {i + 1}, {i + 1}, {i + 1})"
+ )
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1h) from {self.ntbname}")
tdSql.checkRows(10)
- # print(tdSql.queryResult)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from test3")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1h) from {self.ntbname}")
tdSql.checkRows(10)
print(tdSql.queryResult)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
# timestamp = 1h,time_unit =1m
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1m) from {self.ntbname}")
tdSql.checkRows(10)
- # print(tdSql.queryResult)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,), (300,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (-1,), (0,), (60,), (120,), (180,), (240,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (60,), (120,), (180,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,), (300,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (60,), (120,), (180,), (-1,), (0,), (60,), (120,), (180,), (240,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1m) from test3")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1m) from {self.ntbname}")
tdSql.checkRows(10)
print(tdSql.queryResult)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (60,), (120,), (180,), (240,), (300,), (360,), (420,), (480,), (540,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (60,), (120,), (180,), (240,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (60,), (120,), (180,), (240,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (60,), (120,), (180,), (240,), (300,), (360,), (420,), (480,), (540,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
# for stb
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+
+ tdSql.execute(f'''create table {self.stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(t0 int)''')
- tdSql.execute('create table stb_1 using stb tags(1)')
+ tdSql.execute(f'create table {self.stbname}_1 using {self.stbname} tags(1)')
for i in range(self.row_num):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
- % (self.ts + i*1000*60*60, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
+ tdSql.execute(f"insert into {self.stbname}_1 values({self.ts + i*1000*60*60}, {i + 1}, {i + 1},{ i + 1}, {i + 1}, {i + 0.1}, {i + 0.1}, {i % 2},'taosdata{i + 1}', '涛思数据{i + 1}', {i + 1}, {i + 1}, {i + 1}, {i + 1})"
+ )
for i in integer_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1h) from {self.stbname}")
tdSql.checkRows(10)
# print(tdSql.queryResult)
- if j in ['LT' ,'lt','Lt','lT']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GT','gt', 'Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in [ 'GE','ge','Ge','gE']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,), (5,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (0,), (-1,), (-1,), (-1,), (-1,), (-1,)])
for i in float_list:
for j in self.param_list:
- tdSql.query(f"select stateduration(col{i},'{j}',5,1h) from stb")
+ tdSql.query(
+ f"select stateduration(col{i},'{j}',5,1h) from {self.stbname}")
tdSql.checkRows(10)
print(tdSql.queryResult)
- if j in ['LT','lt','Lt','lT','LE','le','Le','lE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
- elif j in ['GE','ge','Ge','gE','GT','gt','Gt','gT']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
- elif j in ['NE','ne','Ne','nE']:
- tdSql.checkEqual(tdSql.queryResult,[(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
- elif j in ['EQ','eq','Eq','eQ']:
- tdSql.checkEqual(tdSql.queryResult,[(-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ if j in ['LT', 'lt', 'Lt', 'lT', 'LE', 'le', 'Le', 'lE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ elif j in ['GE', 'ge', 'Ge', 'gE', 'GT', 'gt', 'Gt', 'gT']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (0,), (1,), (2,), (3,), (4,)])
+ elif j in ['NE', 'ne', 'Ne', 'nE']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,)])
+ elif j in ['EQ', 'eq', 'Eq', 'eQ']:
+ tdSql.checkEqual(tdSql.queryResult, [
+ (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,), (-1,)])
+ def run(self):
+ self.duration_check()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
+
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py
index ea4f963b716d45b2a2e77d8db52286442fb658a5..3551d8ee2cfb0669c23ed1754ebcb65c69e48daa 100644
--- a/tests/system-test/2-query/timetruncate.py
+++ b/tests/system-test/2-query/timetruncate.py
@@ -21,9 +21,10 @@ class TDTestCase:
self.db_param_precision = ['ms','us','ns']
self.time_unit = ['1w','1d','1h','1m','1s','1a','1u','1b']
self.error_unit = ['2w','2d','2h','2m','2s','2a','2u','1c','#1']
- self.ntbname = 'ntb'
- self.stbname = 'stb'
- self.ctbname = 'ctb'
+ self.dbname = 'db'
+ self.ntbname = f'{self.dbname}.ntb'
+ self.stbname = f'{self.dbname}.stb'
+ self.ctbname = f'{self.dbname}.ctb'
def check_ms_timestamp(self,unit,date_time):
if unit.lower() == '1a':
for i in range(len(self.ts_str)):
@@ -140,9 +141,9 @@ class TDTestCase:
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
def function_check_ntb(self):
for precision in self.db_param_precision:
- tdSql.execute('drop database if exists db')
- tdSql.execute(f'create database db precision "{precision}"')
- tdSql.execute('use db')
+ tdSql.execute(f'drop database if exists {self.dbname}')
+ tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
@@ -150,9 +151,9 @@ class TDTestCase:
self.data_check(date_time,precision,'ntb')
def function_check_stb(self):
for precision in self.db_param_precision:
- tdSql.execute('drop database if exists db')
- tdSql.execute(f'create database db precision "{precision}"')
- tdSql.execute('use db')
+ tdSql.execute(f'drop database if exists {self.dbname}')
+ tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)')
for ts in self.ts_str:
diff --git a/tests/system-test/2-query/timezone.py b/tests/system-test/2-query/timezone.py
index 3707d25c88a973ffe4c9a402318aeab14d313e03..9a4953909c20a3812629dc79b49724bdc659f895 100644
--- a/tests/system-test/2-query/timezone.py
+++ b/tests/system-test/2-query/timezone.py
@@ -17,10 +17,11 @@ class TDTestCase:
self.setsql = TDSetSql()
self.arithmetic_operators = ['+','-','*','/']
self.arithmetic_values = [0,1,100,15.5]
+ self.dbname = 'db'
# name of normal table
- self.ntbname = 'ntb'
+ self.ntbname = f'{self.dbname}.ntb'
# name of stable
- self.stbname = 'stb'
+ self.stbname = f'{self.dbname}.stb'
# structure of column
self.column_dict = {
'ts':'timestamp',
@@ -60,7 +61,6 @@ class TDTestCase:
time_zone_1 = os.popen('ls -l /etc/localtime|awk -F/ \'{print $(NF-1) "/" $NF}\'').read().strip()
time_zone_2 = os.popen('date "+(%Z, %z)"').read().strip()
time_zone = time_zone_1 + " " + time_zone_2
- print("expected time zone: " + time_zone)
return time_zone
def tb_type_check(self,tb_type):
@@ -94,7 +94,7 @@ class TDTestCase:
tdSql.query(f"select * from {tbname} where timezone()='{timezone}'")
self.tb_type_check(tb_type)
def timezone_check_ntb(self,timezone):
- tdSql.prepare()
+ tdSql.execute(f'create database {self.dbname}')
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
for value in self.values_list:
tdSql.execute(
@@ -102,7 +102,7 @@ class TDTestCase:
self.data_check(timezone,self.ntbname,'normal_table')
tdSql.execute('drop database db')
def timezone_check_stb(self,timezone):
- tdSql.prepare()
+ tdSql.execute(f'create database {self.dbname}')
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f'create table if not exists {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})')
diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py
index a7a433a8977d6daeb21c944d75535da3038c9674..41d6f32cfd0771f9f339a967856de3bb0f254844 100644
--- a/tests/system-test/2-query/top.py
+++ b/tests/system-test/2-query/top.py
@@ -23,7 +23,9 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
- self.ntbname = 'ntb'
+ self.dbname = 'db'
+ self.stbname = f'{self.dbname}.stb'
+ self.ntbname = f'{self.dbname}.ntb'
self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000
@@ -96,52 +98,49 @@ class TDTestCase:
else:
tdSql.checkRows(2*self.tbnum)
def top_check_stb(self):
- dbname = tdCom.getLongName(10, "letters")
- stbname = tdCom.getLongName(5, "letters")
+
tag_dict = {
't0':'int'
}
tag_values = [
f'1'
]
- tdSql.execute(f"create database if not exists {dbname} vgroups 2")
- tdSql.execute(f'use {dbname}')
- tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict))
+ tdSql.execute(f"create database if not exists {self.dbname} vgroups 2")
+ tdSql.execute(f'use {self.dbname}')
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,tag_dict))
for i in range(self.tbnum):
- tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
- self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
- tdSql.query('show tables')
+ tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({tag_values[0]})")
+ self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum)
+ tdSql.query(f'show {self.dbname}.tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
-
vgroup_list_set = set(vgroup_list)
for i in vgroup_list_set:
vgroups_num = vgroup_list.count(i)
if vgroups_num >= 2:
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
-
else:
tdLog.exit(
'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
for i in range(self.tbnum):
- self.top_check_data(f'{stbname}_{i}','child_table')
- self.top_check_data(stbname,'stable')
- tdSql.execute(f'drop database {dbname}')
+ self.top_check_data(f'{self.stbname}_{i}','child_table')
+ self.top_check_data(self.stbname,'stable')
+ tdSql.execute(f'drop database {self.dbname}')
def top_check_ntb(self):
- tdSql.prepare()
+ tdSql.execute(f"create database if not exists {self.dbname}")
+ tdSql.execute(f'use {self.dbname}')
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
self.insert_data(self.column_dict,self.ntbname,self.rowNum)
self.top_check_data(self.ntbname,'normal_table')
- tdSql.execute('drop database db')
+ tdSql.execute(f'drop database {self.dbname}')
def run(self):
self.top_check_ntb()
self.top_check_stb()
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)